[MTD] [NAND] nandsim: avoid deadlocking FS
[linux-2.6] / kernel / exit.c
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #include <linux/mnt_namespace.h>
16 #include <linux/key.h>
17 #include <linux/security.h>
18 #include <linux/cpu.h>
19 #include <linux/acct.h>
20 #include <linux/tsacct_kern.h>
21 #include <linux/file.h>
22 #include <linux/binfmts.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/ptrace.h>
26 #include <linux/profile.h>
27 #include <linux/signalfd.h>
28 #include <linux/mount.h>
29 #include <linux/proc_fs.h>
30 #include <linux/kthread.h>
31 #include <linux/mempolicy.h>
32 #include <linux/taskstats_kern.h>
33 #include <linux/delayacct.h>
34 #include <linux/freezer.h>
35 #include <linux/cpuset.h>
36 #include <linux/syscalls.h>
37 #include <linux/signal.h>
38 #include <linux/posix-timers.h>
39 #include <linux/cn_proc.h>
40 #include <linux/mutex.h>
41 #include <linux/futex.h>
42 #include <linux/compat.h>
43 #include <linux/pipe_fs_i.h>
44 #include <linux/audit.h> /* for audit_free() */
45 #include <linux/resource.h>
46 #include <linux/blkdev.h>
47 #include <linux/task_io_accounting_ops.h>
48 #include <linux/freezer.h>
49
50 #include <asm/uaccess.h>
51 #include <asm/unistd.h>
52 #include <asm/pgtable.h>
53 #include <asm/mmu_context.h>
54
55 extern void sem_exit (void);
56
57 static void exit_mm(struct task_struct * tsk);
58
59 static void __unhash_process(struct task_struct *p)
60 {
61         nr_threads--;
62         detach_pid(p, PIDTYPE_PID);
63         if (thread_group_leader(p)) {
64                 detach_pid(p, PIDTYPE_PGID);
65                 detach_pid(p, PIDTYPE_SID);
66
67                 list_del_rcu(&p->tasks);
68                 __get_cpu_var(process_counts)--;
69         }
70         list_del_rcu(&p->thread_group);
71         remove_parent(p);
72 }
73
74 /*
75  * This function expects the tasklist_lock write-locked.
76  */
77 static void __exit_signal(struct task_struct *tsk)
78 {
79         struct signal_struct *sig = tsk->signal;
80         struct sighand_struct *sighand;
81
82         BUG_ON(!sig);
83         BUG_ON(!atomic_read(&sig->count));
84
85         rcu_read_lock();
86         sighand = rcu_dereference(tsk->sighand);
87         spin_lock(&sighand->siglock);
88
89         /*
90          * Notify that this sighand has been detached. This must
91          * be called with the tsk->sighand lock held. Also, this
92          * access tsk->sighand internally, so it must be called
93          * before tsk->sighand is reset.
94          */
95         signalfd_detach_locked(tsk);
96
97         posix_cpu_timers_exit(tsk);
98         if (atomic_dec_and_test(&sig->count))
99                 posix_cpu_timers_exit_group(tsk);
100         else {
101                 /*
102                  * If there is any task waiting for the group exit
103                  * then notify it:
104                  */
105                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
106                         wake_up_process(sig->group_exit_task);
107                         sig->group_exit_task = NULL;
108                 }
109                 if (tsk == sig->curr_target)
110                         sig->curr_target = next_thread(tsk);
111                 /*
112                  * Accumulate here the counters for all threads but the
113                  * group leader as they die, so they can be added into
114                  * the process-wide totals when those are taken.
115                  * The group leader stays around as a zombie as long
116                  * as there are other threads.  When it gets reaped,
117                  * the exit.c code will add its counts into these totals.
118                  * We won't ever get here for the group leader, since it
119                  * will have been the last reference on the signal_struct.
120                  */
121                 sig->utime = cputime_add(sig->utime, tsk->utime);
122                 sig->stime = cputime_add(sig->stime, tsk->stime);
123                 sig->min_flt += tsk->min_flt;
124                 sig->maj_flt += tsk->maj_flt;
125                 sig->nvcsw += tsk->nvcsw;
126                 sig->nivcsw += tsk->nivcsw;
127                 sig->inblock += task_io_get_inblock(tsk);
128                 sig->oublock += task_io_get_oublock(tsk);
129                 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
130                 sig = NULL; /* Marker for below. */
131         }
132
133         __unhash_process(tsk);
134
135         tsk->signal = NULL;
136         tsk->sighand = NULL;
137         spin_unlock(&sighand->siglock);
138         rcu_read_unlock();
139
140         __cleanup_sighand(sighand);
141         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
142         flush_sigqueue(&tsk->pending);
143         if (sig) {
144                 flush_sigqueue(&sig->shared_pending);
145                 taskstats_tgid_free(sig);
146                 __cleanup_signal(sig);
147         }
148 }
149
150 static void delayed_put_task_struct(struct rcu_head *rhp)
151 {
152         put_task_struct(container_of(rhp, struct task_struct, rcu));
153 }
154
155 void release_task(struct task_struct * p)
156 {
157         struct task_struct *leader;
158         int zap_leader;
159 repeat:
160         atomic_dec(&p->user->processes);
161         write_lock_irq(&tasklist_lock);
162         ptrace_unlink(p);
163         BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
164         __exit_signal(p);
165
166         /*
167          * If we are the last non-leader member of the thread
168          * group, and the leader is zombie, then notify the
169          * group leader's parent process. (if it wants notification.)
170          */
171         zap_leader = 0;
172         leader = p->group_leader;
173         if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
174                 BUG_ON(leader->exit_signal == -1);
175                 do_notify_parent(leader, leader->exit_signal);
176                 /*
177                  * If we were the last child thread and the leader has
178                  * exited already, and the leader's parent ignores SIGCHLD,
179                  * then we are the one who should release the leader.
180                  *
181                  * do_notify_parent() will have marked it self-reaping in
182                  * that case.
183                  */
184                 zap_leader = (leader->exit_signal == -1);
185         }
186
187         write_unlock_irq(&tasklist_lock);
188         proc_flush_task(p);
189         release_thread(p);
190         call_rcu(&p->rcu, delayed_put_task_struct);
191
192         p = leader;
193         if (unlikely(zap_leader))
194                 goto repeat;
195 }
196
197 /*
198  * This checks not only the pgrp, but falls back on the pid if no
199  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
200  * without this...
201  *
202  * The caller must hold rcu lock or the tasklist lock.
203  */
204 struct pid *session_of_pgrp(struct pid *pgrp)
205 {
206         struct task_struct *p;
207         struct pid *sid = NULL;
208
209         p = pid_task(pgrp, PIDTYPE_PGID);
210         if (p == NULL)
211                 p = pid_task(pgrp, PIDTYPE_PID);
212         if (p != NULL)
213                 sid = task_session(p);
214
215         return sid;
216 }
217
218 /*
219  * Determine if a process group is "orphaned", according to the POSIX
220  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
221  * by terminal-generated stop signals.  Newly orphaned process groups are
222  * to receive a SIGHUP and a SIGCONT.
223  *
224  * "I ask you, have you ever known what it is to be an orphan?"
225  */
226 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
227 {
228         struct task_struct *p;
229         int ret = 1;
230
231         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
232                 if (p == ignored_task
233                                 || p->exit_state
234                                 || is_init(p->real_parent))
235                         continue;
236                 if (task_pgrp(p->real_parent) != pgrp &&
237                     task_session(p->real_parent) == task_session(p)) {
238                         ret = 0;
239                         break;
240                 }
241         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
242         return ret;     /* (sighing) "Often!" */
243 }
244
245 int is_current_pgrp_orphaned(void)
246 {
247         int retval;
248
249         read_lock(&tasklist_lock);
250         retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
251         read_unlock(&tasklist_lock);
252
253         return retval;
254 }
255
256 static int has_stopped_jobs(struct pid *pgrp)
257 {
258         int retval = 0;
259         struct task_struct *p;
260
261         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
262                 if (p->state != TASK_STOPPED)
263                         continue;
264                 retval = 1;
265                 break;
266         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
267         return retval;
268 }
269
270 /**
271  * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
272  *
273  * If a kernel thread is launched as a result of a system call, or if
274  * it ever exits, it should generally reparent itself to kthreadd so it
275  * isn't in the way of other processes and is correctly cleaned up on exit.
276  *
277  * The various task state such as scheduling policy and priority may have
278  * been inherited from a user process, so we reset them to sane values here.
279  *
280  * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
281  */
282 static void reparent_to_kthreadd(void)
283 {
284         write_lock_irq(&tasklist_lock);
285
286         ptrace_unlink(current);
287         /* Reparent to init */
288         remove_parent(current);
289         current->real_parent = current->parent = kthreadd_task;
290         add_parent(current);
291
292         /* Set the exit signal to SIGCHLD so we signal init on exit */
293         current->exit_signal = SIGCHLD;
294
295         if (task_nice(current) < 0)
296                 set_user_nice(current, 0);
297         /* cpus_allowed? */
298         /* rt_priority? */
299         /* signals? */
300         security_task_reparent_to_init(current);
301         memcpy(current->signal->rlim, init_task.signal->rlim,
302                sizeof(current->signal->rlim));
303         atomic_inc(&(INIT_USER->__count));
304         write_unlock_irq(&tasklist_lock);
305         switch_uid(INIT_USER);
306 }
307
308 void __set_special_pids(pid_t session, pid_t pgrp)
309 {
310         struct task_struct *curr = current->group_leader;
311
312         if (process_session(curr) != session) {
313                 detach_pid(curr, PIDTYPE_SID);
314                 set_signal_session(curr->signal, session);
315                 attach_pid(curr, PIDTYPE_SID, find_pid(session));
316         }
317         if (process_group(curr) != pgrp) {
318                 detach_pid(curr, PIDTYPE_PGID);
319                 curr->signal->pgrp = pgrp;
320                 attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));
321         }
322 }
323
324 static void set_special_pids(pid_t session, pid_t pgrp)
325 {
326         write_lock_irq(&tasklist_lock);
327         __set_special_pids(session, pgrp);
328         write_unlock_irq(&tasklist_lock);
329 }
330
331 /*
332  * Let kernel threads use this to say that they
333  * allow a certain signal (since daemonize() will
334  * have disabled all of them by default).
335  */
336 int allow_signal(int sig)
337 {
338         if (!valid_signal(sig) || sig < 1)
339                 return -EINVAL;
340
341         spin_lock_irq(&current->sighand->siglock);
342         sigdelset(&current->blocked, sig);
343         if (!current->mm) {
344                 /* Kernel threads handle their own signals.
345                    Let the signal code know it'll be handled, so
346                    that they don't get converted to SIGKILL or
347                    just silently dropped */
348                 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
349         }
350         recalc_sigpending();
351         spin_unlock_irq(&current->sighand->siglock);
352         return 0;
353 }
354
355 EXPORT_SYMBOL(allow_signal);
356
357 int disallow_signal(int sig)
358 {
359         if (!valid_signal(sig) || sig < 1)
360                 return -EINVAL;
361
362         spin_lock_irq(&current->sighand->siglock);
363         current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
364         recalc_sigpending();
365         spin_unlock_irq(&current->sighand->siglock);
366         return 0;
367 }
368
369 EXPORT_SYMBOL(disallow_signal);
370
371 /*
372  *      Put all the gunge required to become a kernel thread without
373  *      attached user resources in one place where it belongs.
374  */
375
376 void daemonize(const char *name, ...)
377 {
378         va_list args;
379         struct fs_struct *fs;
380         sigset_t blocked;
381
382         va_start(args, name);
383         vsnprintf(current->comm, sizeof(current->comm), name, args);
384         va_end(args);
385
386         /*
387          * If we were started as result of loading a module, close all of the
388          * user space pages.  We don't need them, and if we didn't close them
389          * they would be locked into memory.
390          */
391         exit_mm(current);
392         /*
393          * We don't want to have TIF_FREEZE set if the system-wide hibernation
394          * or suspend transition begins right now.
395          */
396         current->flags |= PF_NOFREEZE;
397
398         set_special_pids(1, 1);
399         proc_clear_tty(current);
400
401         /* Block and flush all signals */
402         sigfillset(&blocked);
403         sigprocmask(SIG_BLOCK, &blocked, NULL);
404         flush_signals(current);
405
406         /* Become as one with the init task */
407
408         exit_fs(current);       /* current->fs->count--; */
409         fs = init_task.fs;
410         current->fs = fs;
411         atomic_inc(&fs->count);
412
413         exit_task_namespaces(current);
414         current->nsproxy = init_task.nsproxy;
415         get_task_namespaces(current);
416
417         exit_files(current);
418         current->files = init_task.files;
419         atomic_inc(&current->files->count);
420
421         reparent_to_kthreadd();
422 }
423
424 EXPORT_SYMBOL(daemonize);
425
426 static void close_files(struct files_struct * files)
427 {
428         int i, j;
429         struct fdtable *fdt;
430
431         j = 0;
432
433         /*
434          * It is safe to dereference the fd table without RCU or
435          * ->file_lock because this is the last reference to the
436          * files structure.
437          */
438         fdt = files_fdtable(files);
439         for (;;) {
440                 unsigned long set;
441                 i = j * __NFDBITS;
442                 if (i >= fdt->max_fds)
443                         break;
444                 set = fdt->open_fds->fds_bits[j++];
445                 while (set) {
446                         if (set & 1) {
447                                 struct file * file = xchg(&fdt->fd[i], NULL);
448                                 if (file) {
449                                         filp_close(file, files);
450                                         cond_resched();
451                                 }
452                         }
453                         i++;
454                         set >>= 1;
455                 }
456         }
457 }
458
459 struct files_struct *get_files_struct(struct task_struct *task)
460 {
461         struct files_struct *files;
462
463         task_lock(task);
464         files = task->files;
465         if (files)
466                 atomic_inc(&files->count);
467         task_unlock(task);
468
469         return files;
470 }
471
472 void fastcall put_files_struct(struct files_struct *files)
473 {
474         struct fdtable *fdt;
475
476         if (atomic_dec_and_test(&files->count)) {
477                 close_files(files);
478                 /*
479                  * Free the fd and fdset arrays if we expanded them.
480                  * If the fdtable was embedded, pass files for freeing
481                  * at the end of the RCU grace period. Otherwise,
482                  * you can free files immediately.
483                  */
484                 fdt = files_fdtable(files);
485                 if (fdt != &files->fdtab)
486                         kmem_cache_free(files_cachep, files);
487                 free_fdtable(fdt);
488         }
489 }
490
491 EXPORT_SYMBOL(put_files_struct);
492
493 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
494 {
495         struct files_struct *old;
496
497         old = tsk->files;
498         task_lock(tsk);
499         tsk->files = files;
500         task_unlock(tsk);
501         put_files_struct(old);
502 }
503 EXPORT_SYMBOL(reset_files_struct);
504
505 static inline void __exit_files(struct task_struct *tsk)
506 {
507         struct files_struct * files = tsk->files;
508
509         if (files) {
510                 task_lock(tsk);
511                 tsk->files = NULL;
512                 task_unlock(tsk);
513                 put_files_struct(files);
514         }
515 }
516
517 void exit_files(struct task_struct *tsk)
518 {
519         __exit_files(tsk);
520 }
521
522 static inline void __put_fs_struct(struct fs_struct *fs)
523 {
524         /* No need to hold fs->lock if we are killing it */
525         if (atomic_dec_and_test(&fs->count)) {
526                 dput(fs->root);
527                 mntput(fs->rootmnt);
528                 dput(fs->pwd);
529                 mntput(fs->pwdmnt);
530                 if (fs->altroot) {
531                         dput(fs->altroot);
532                         mntput(fs->altrootmnt);
533                 }
534                 kmem_cache_free(fs_cachep, fs);
535         }
536 }
537
538 void put_fs_struct(struct fs_struct *fs)
539 {
540         __put_fs_struct(fs);
541 }
542
543 static inline void __exit_fs(struct task_struct *tsk)
544 {
545         struct fs_struct * fs = tsk->fs;
546
547         if (fs) {
548                 task_lock(tsk);
549                 tsk->fs = NULL;
550                 task_unlock(tsk);
551                 __put_fs_struct(fs);
552         }
553 }
554
555 void exit_fs(struct task_struct *tsk)
556 {
557         __exit_fs(tsk);
558 }
559
560 EXPORT_SYMBOL_GPL(exit_fs);
561
562 /*
563  * Turn us into a lazy TLB process if we
564  * aren't already..
565  */
566 static void exit_mm(struct task_struct * tsk)
567 {
568         struct mm_struct *mm = tsk->mm;
569
570         mm_release(tsk, mm);
571         if (!mm)
572                 return;
573         /*
574          * Serialize with any possible pending coredump.
575          * We must hold mmap_sem around checking core_waiters
576          * and clearing tsk->mm.  The core-inducing thread
577          * will increment core_waiters for each thread in the
578          * group with ->mm != NULL.
579          */
580         down_read(&mm->mmap_sem);
581         if (mm->core_waiters) {
582                 up_read(&mm->mmap_sem);
583                 down_write(&mm->mmap_sem);
584                 if (!--mm->core_waiters)
585                         complete(mm->core_startup_done);
586                 up_write(&mm->mmap_sem);
587
588                 wait_for_completion(&mm->core_done);
589                 down_read(&mm->mmap_sem);
590         }
591         atomic_inc(&mm->mm_count);
592         BUG_ON(mm != tsk->active_mm);
593         /* more a memory barrier than a real lock */
594         task_lock(tsk);
595         tsk->mm = NULL;
596         up_read(&mm->mmap_sem);
597         enter_lazy_tlb(mm, current);
598         /* We don't want this task to be frozen prematurely */
599         clear_freeze_flag(tsk);
600         task_unlock(tsk);
601         mmput(mm);
602 }
603
604 static inline void
605 choose_new_parent(struct task_struct *p, struct task_struct *reaper)
606 {
607         /*
608          * Make sure we're not reparenting to ourselves and that
609          * the parent is not a zombie.
610          */
611         BUG_ON(p == reaper || reaper->exit_state);
612         p->real_parent = reaper;
613 }
614
615 static void
616 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
617 {
618         if (p->pdeath_signal)
619                 /* We already hold the tasklist_lock here.  */
620                 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
621
622         /* Move the child from its dying parent to the new one.  */
623         if (unlikely(traced)) {
624                 /* Preserve ptrace links if someone else is tracing this child.  */
625                 list_del_init(&p->ptrace_list);
626                 if (p->parent != p->real_parent)
627                         list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
628         } else {
629                 /* If this child is being traced, then we're the one tracing it
630                  * anyway, so let go of it.
631                  */
632                 p->ptrace = 0;
633                 remove_parent(p);
634                 p->parent = p->real_parent;
635                 add_parent(p);
636
637                 if (p->state == TASK_TRACED) {
638                         /*
639                          * If it was at a trace stop, turn it into
640                          * a normal stop since it's no longer being
641                          * traced.
642                          */
643                         ptrace_untrace(p);
644                 }
645         }
646
647         /* If this is a threaded reparent there is no need to
648          * notify anyone anything has happened.
649          */
650         if (p->real_parent->group_leader == father->group_leader)
651                 return;
652
653         /* We don't want people slaying init.  */
654         if (p->exit_signal != -1)
655                 p->exit_signal = SIGCHLD;
656
657         /* If we'd notified the old parent about this child's death,
658          * also notify the new parent.
659          */
660         if (!traced && p->exit_state == EXIT_ZOMBIE &&
661             p->exit_signal != -1 && thread_group_empty(p))
662                 do_notify_parent(p, p->exit_signal);
663
664         /*
665          * process group orphan check
666          * Case ii: Our child is in a different pgrp
667          * than we are, and it was the only connection
668          * outside, so the child pgrp is now orphaned.
669          */
670         if ((task_pgrp(p) != task_pgrp(father)) &&
671             (task_session(p) == task_session(father))) {
672                 struct pid *pgrp = task_pgrp(p);
673
674                 if (will_become_orphaned_pgrp(pgrp, NULL) &&
675                     has_stopped_jobs(pgrp)) {
676                         __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
677                         __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
678                 }
679         }
680 }
681
682 /*
683  * When we die, we re-parent all our children.
684  * Try to give them to another thread in our thread
685  * group, and if no such member exists, give it to
686  * the child reaper process (ie "init") in our pid
687  * space.
688  */
689 static void
690 forget_original_parent(struct task_struct *father, struct list_head *to_release)
691 {
692         struct task_struct *p, *reaper = father;
693         struct list_head *_p, *_n;
694
695         do {
696                 reaper = next_thread(reaper);
697                 if (reaper == father) {
698                         reaper = child_reaper(father);
699                         break;
700                 }
701         } while (reaper->exit_state);
702
703         /*
704          * There are only two places where our children can be:
705          *
706          * - in our child list
707          * - in our ptraced child list
708          *
709          * Search them and reparent children.
710          */
711         list_for_each_safe(_p, _n, &father->children) {
712                 int ptrace;
713                 p = list_entry(_p, struct task_struct, sibling);
714
715                 ptrace = p->ptrace;
716
717                 /* if father isn't the real parent, then ptrace must be enabled */
718                 BUG_ON(father != p->real_parent && !ptrace);
719
720                 if (father == p->real_parent) {
721                         /* reparent with a reaper, real father it's us */
722                         choose_new_parent(p, reaper);
723                         reparent_thread(p, father, 0);
724                 } else {
725                         /* reparent ptraced task to its real parent */
726                         __ptrace_unlink (p);
727                         if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
728                             thread_group_empty(p))
729                                 do_notify_parent(p, p->exit_signal);
730                 }
731
732                 /*
733                  * if the ptraced child is a zombie with exit_signal == -1
734                  * we must collect it before we exit, or it will remain
735                  * zombie forever since we prevented it from self-reap itself
736                  * while it was being traced by us, to be able to see it in wait4.
737                  */
738                 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
739                         list_add(&p->ptrace_list, to_release);
740         }
741         list_for_each_safe(_p, _n, &father->ptrace_children) {
742                 p = list_entry(_p, struct task_struct, ptrace_list);
743                 choose_new_parent(p, reaper);
744                 reparent_thread(p, father, 1);
745         }
746 }
747
748 /*
749  * Send signals to all our closest relatives so that they know
750  * to properly mourn us..
751  */
752 static void exit_notify(struct task_struct *tsk)
753 {
754         int state;
755         struct task_struct *t;
756         struct list_head ptrace_dead, *_p, *_n;
757         struct pid *pgrp;
758
759         if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
760             && !thread_group_empty(tsk)) {
761                 /*
762                  * This occurs when there was a race between our exit
763                  * syscall and a group signal choosing us as the one to
764                  * wake up.  It could be that we are the only thread
765                  * alerted to check for pending signals, but another thread
766                  * should be woken now to take the signal since we will not.
767                  * Now we'll wake all the threads in the group just to make
768                  * sure someone gets all the pending signals.
769                  */
770                 read_lock(&tasklist_lock);
771                 spin_lock_irq(&tsk->sighand->siglock);
772                 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
773                         if (!signal_pending(t) && !(t->flags & PF_EXITING))
774                                 recalc_sigpending_and_wake(t);
775                 spin_unlock_irq(&tsk->sighand->siglock);
776                 read_unlock(&tasklist_lock);
777         }
778
779         write_lock_irq(&tasklist_lock);
780
781         /*
782          * This does two things:
783          *
784          * A.  Make init inherit all the child processes
785          * B.  Check to see if any process groups have become orphaned
786          *      as a result of our exiting, and if they have any stopped
787          *      jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
788          */
789
790         INIT_LIST_HEAD(&ptrace_dead);
791         forget_original_parent(tsk, &ptrace_dead);
792         BUG_ON(!list_empty(&tsk->children));
793         BUG_ON(!list_empty(&tsk->ptrace_children));
794
795         /*
796          * Check to see if any process groups have become orphaned
797          * as a result of our exiting, and if they have any stopped
798          * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
799          *
800          * Case i: Our father is in a different pgrp than we are
801          * and we were the only connection outside, so our pgrp
802          * is about to become orphaned.
803          */
804          
805         t = tsk->real_parent;
806         
807         pgrp = task_pgrp(tsk);
808         if ((task_pgrp(t) != pgrp) &&
809             (task_session(t) == task_session(tsk)) &&
810             will_become_orphaned_pgrp(pgrp, tsk) &&
811             has_stopped_jobs(pgrp)) {
812                 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
813                 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
814         }
815
816         /* Let father know we died
817          *
818          * Thread signals are configurable, but you aren't going to use
819          * that to send signals to arbitary processes. 
820          * That stops right now.
821          *
822          * If the parent exec id doesn't match the exec id we saved
823          * when we started then we know the parent has changed security
824          * domain.
825          *
826          * If our self_exec id doesn't match our parent_exec_id then
827          * we have changed execution domain as these two values started
828          * the same after a fork.
829          */
830         if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
831             ( tsk->parent_exec_id != t->self_exec_id  ||
832               tsk->self_exec_id != tsk->parent_exec_id)
833             && !capable(CAP_KILL))
834                 tsk->exit_signal = SIGCHLD;
835
836
837         /* If something other than our normal parent is ptracing us, then
838          * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
839          * only has special meaning to our real parent.
840          */
841         if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
842                 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
843                 do_notify_parent(tsk, signal);
844         } else if (tsk->ptrace) {
845                 do_notify_parent(tsk, SIGCHLD);
846         }
847
848         state = EXIT_ZOMBIE;
849         if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
850                 state = EXIT_DEAD;
851         tsk->exit_state = state;
852
853         write_unlock_irq(&tasklist_lock);
854
855         list_for_each_safe(_p, _n, &ptrace_dead) {
856                 list_del_init(_p);
857                 t = list_entry(_p, struct task_struct, ptrace_list);
858                 release_task(t);
859         }
860
861         /* If the process is dead, release it - nobody will wait for it */
862         if (state == EXIT_DEAD)
863                 release_task(tsk);
864 }
865
866 #ifdef CONFIG_DEBUG_STACK_USAGE
867 static void check_stack_usage(void)
868 {
869         static DEFINE_SPINLOCK(low_water_lock);
870         static int lowest_to_date = THREAD_SIZE;
871         unsigned long *n = end_of_stack(current);
872         unsigned long free;
873
874         while (*n == 0)
875                 n++;
876         free = (unsigned long)n - (unsigned long)end_of_stack(current);
877
878         if (free >= lowest_to_date)
879                 return;
880
881         spin_lock(&low_water_lock);
882         if (free < lowest_to_date) {
883                 printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
884                                 "left\n",
885                                 current->comm, free);
886                 lowest_to_date = free;
887         }
888         spin_unlock(&low_water_lock);
889 }
890 #else
891 static inline void check_stack_usage(void) {}
892 #endif
893
894 fastcall NORET_TYPE void do_exit(long code)
895 {
896         struct task_struct *tsk = current;
897         int group_dead;
898
899         profile_task_exit(tsk);
900
901         WARN_ON(atomic_read(&tsk->fs_excl));
902
903         if (unlikely(in_interrupt()))
904                 panic("Aiee, killing interrupt handler!");
905         if (unlikely(!tsk->pid))
906                 panic("Attempted to kill the idle task!");
907         if (unlikely(tsk == child_reaper(tsk))) {
908                 if (tsk->nsproxy->pid_ns != &init_pid_ns)
909                         tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;
910                 else
911                         panic("Attempted to kill init!");
912         }
913
914
915         if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
916                 current->ptrace_message = code;
917                 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
918         }
919
920         /*
921          * We're taking recursive faults here in do_exit. Safest is to just
922          * leave this task alone and wait for reboot.
923          */
924         if (unlikely(tsk->flags & PF_EXITING)) {
925                 printk(KERN_ALERT
926                         "Fixing recursive fault but reboot is needed!\n");
927                 /*
928                  * We can do this unlocked here. The futex code uses
929                  * this flag just to verify whether the pi state
930                  * cleanup has been done or not. In the worst case it
931                  * loops once more. We pretend that the cleanup was
932                  * done as there is no way to return. Either the
933                  * OWNER_DIED bit is set by now or we push the blocked
934                  * task into the wait for ever nirwana as well.
935                  */
936                 tsk->flags |= PF_EXITPIDONE;
937                 if (tsk->io_context)
938                         exit_io_context();
939                 set_current_state(TASK_UNINTERRUPTIBLE);
940                 schedule();
941         }
942
943         /*
944          * tsk->flags are checked in the futex code to protect against
945          * an exiting task cleaning up the robust pi futexes.
946          */
947         spin_lock_irq(&tsk->pi_lock);
948         tsk->flags |= PF_EXITING;
949         spin_unlock_irq(&tsk->pi_lock);
950
951         if (unlikely(in_atomic()))
952                 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
953                                 current->comm, current->pid,
954                                 preempt_count());
955
956         acct_update_integrals(tsk);
957         if (tsk->mm) {
958                 update_hiwater_rss(tsk->mm);
959                 update_hiwater_vm(tsk->mm);
960         }
961         group_dead = atomic_dec_and_test(&tsk->signal->live);
962         if (group_dead) {
963                 hrtimer_cancel(&tsk->signal->real_timer);
964                 exit_itimers(tsk->signal);
965         }
966         acct_collect(code, group_dead);
967         if (unlikely(tsk->robust_list))
968                 exit_robust_list(tsk);
969 #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
970         if (unlikely(tsk->compat_robust_list))
971                 compat_exit_robust_list(tsk);
972 #endif
973         if (group_dead)
974                 tty_audit_exit();
975         if (unlikely(tsk->audit_context))
976                 audit_free(tsk);
977
978         taskstats_exit(tsk, group_dead);
979
980         exit_mm(tsk);
981
982         if (group_dead)
983                 acct_process();
984         exit_sem(tsk);
985         __exit_files(tsk);
986         __exit_fs(tsk);
987         check_stack_usage();
988         exit_thread();
989         cpuset_exit(tsk);
990         exit_keys(tsk);
991
992         if (group_dead && tsk->signal->leader)
993                 disassociate_ctty(1);
994
995         module_put(task_thread_info(tsk)->exec_domain->module);
996         if (tsk->binfmt)
997                 module_put(tsk->binfmt->module);
998
999         tsk->exit_code = code;
1000         proc_exit_connector(tsk);
1001         exit_task_namespaces(tsk);
1002         exit_notify(tsk);
1003 #ifdef CONFIG_NUMA
1004         mpol_free(tsk->mempolicy);
1005         tsk->mempolicy = NULL;
1006 #endif
1007         /*
1008          * This must happen late, after the PID is not
1009          * hashed anymore:
1010          */
1011         if (unlikely(!list_empty(&tsk->pi_state_list)))
1012                 exit_pi_state_list(tsk);
1013         if (unlikely(current->pi_state_cache))
1014                 kfree(current->pi_state_cache);
1015         /*
1016          * Make sure we are holding no locks:
1017          */
1018         debug_check_no_locks_held(tsk);
1019         /*
1020          * We can do this unlocked here. The futex code uses this flag
1021          * just to verify whether the pi state cleanup has been done
1022          * or not. In the worst case it loops once more.
1023          */
1024         tsk->flags |= PF_EXITPIDONE;
1025
1026         if (tsk->io_context)
1027                 exit_io_context();
1028
1029         if (tsk->splice_pipe)
1030                 __free_pipe_info(tsk->splice_pipe);
1031
1032         preempt_disable();
1033         /* causes final put_task_struct in finish_task_switch(). */
1034         tsk->state = TASK_DEAD;
1035
1036         schedule();
1037         BUG();
1038         /* Avoid "noreturn function does return".  */
1039         for (;;)
1040                 cpu_relax();    /* For when BUG is null */
1041 }
1042
1043 EXPORT_SYMBOL_GPL(do_exit);
1044
1045 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1046 {
1047         if (comp)
1048                 complete(comp);
1049
1050         do_exit(code);
1051 }
1052
1053 EXPORT_SYMBOL(complete_and_exit);
1054
1055 asmlinkage long sys_exit(int error_code)
1056 {
1057         do_exit((error_code&0xff)<<8);
1058 }
1059
1060 /*
1061  * Take down every thread in the group.  This is called by fatal signals
1062  * as well as by sys_exit_group (below).
1063  */
1064 NORET_TYPE void
1065 do_group_exit(int exit_code)
1066 {
1067         BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1068
1069         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1070                 exit_code = current->signal->group_exit_code;
1071         else if (!thread_group_empty(current)) {
1072                 struct signal_struct *const sig = current->signal;
1073                 struct sighand_struct *const sighand = current->sighand;
1074                 spin_lock_irq(&sighand->siglock);
1075                 if (sig->flags & SIGNAL_GROUP_EXIT)
1076                         /* Another thread got here before we took the lock.  */
1077                         exit_code = sig->group_exit_code;
1078                 else {
1079                         sig->group_exit_code = exit_code;
1080                         zap_other_threads(current);
1081                 }
1082                 spin_unlock_irq(&sighand->siglock);
1083         }
1084
1085         do_exit(exit_code);
1086         /* NOTREACHED */
1087 }
1088
1089 /*
1090  * this kills every thread in the thread group. Note that any externally
1091  * wait4()-ing process will get the correct exit code - even if this
1092  * thread is not the thread group leader.
1093  */
1094 asmlinkage void sys_exit_group(int error_code)
1095 {
1096         do_group_exit((error_code & 0xff) << 8);
1097 }
1098
1099 static int eligible_child(pid_t pid, int options, struct task_struct *p)
1100 {
1101         int err;
1102
1103         if (pid > 0) {
1104                 if (p->pid != pid)
1105                         return 0;
1106         } else if (!pid) {
1107                 if (process_group(p) != process_group(current))
1108                         return 0;
1109         } else if (pid != -1) {
1110                 if (process_group(p) != -pid)
1111                         return 0;
1112         }
1113
1114         /*
1115          * Do not consider detached threads that are
1116          * not ptraced:
1117          */
1118         if (p->exit_signal == -1 && !p->ptrace)
1119                 return 0;
1120
1121         /* Wait for all children (clone and not) if __WALL is set;
1122          * otherwise, wait for clone children *only* if __WCLONE is
1123          * set; otherwise, wait for non-clone children *only*.  (Note:
1124          * A "clone" child here is one that reports to its parent
1125          * using a signal other than SIGCHLD.) */
1126         if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1127             && !(options & __WALL))
1128                 return 0;
1129         /*
1130          * Do not consider thread group leaders that are
1131          * in a non-empty thread group:
1132          */
1133         if (delay_group_leader(p))
1134                 return 2;
1135
1136         err = security_task_wait(p);
1137         if (err)
1138                 return err;
1139
1140         return 1;
1141 }
1142
1143 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1144                                int why, int status,
1145                                struct siginfo __user *infop,
1146                                struct rusage __user *rusagep)
1147 {
1148         int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1149
1150         put_task_struct(p);
1151         if (!retval)
1152                 retval = put_user(SIGCHLD, &infop->si_signo);
1153         if (!retval)
1154                 retval = put_user(0, &infop->si_errno);
1155         if (!retval)
1156                 retval = put_user((short)why, &infop->si_code);
1157         if (!retval)
1158                 retval = put_user(pid, &infop->si_pid);
1159         if (!retval)
1160                 retval = put_user(uid, &infop->si_uid);
1161         if (!retval)
1162                 retval = put_user(status, &infop->si_status);
1163         if (!retval)
1164                 retval = pid;
1165         return retval;
1166 }
1167
1168 /*
1169  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1170  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1171  * the lock and this task is uninteresting.  If we return nonzero, we have
1172  * released the lock and the system call should return.
1173  */
1174 static int wait_task_zombie(struct task_struct *p, int noreap,
1175                             struct siginfo __user *infop,
1176                             int __user *stat_addr, struct rusage __user *ru)
1177 {
1178         unsigned long state;
1179         int retval;
1180         int status;
1181
1182         if (unlikely(noreap)) {
1183                 pid_t pid = p->pid;
1184                 uid_t uid = p->uid;
1185                 int exit_code = p->exit_code;
1186                 int why, status;
1187
1188                 if (unlikely(p->exit_state != EXIT_ZOMBIE))
1189                         return 0;
1190                 if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1191                         return 0;
1192                 get_task_struct(p);
1193                 read_unlock(&tasklist_lock);
1194                 if ((exit_code & 0x7f) == 0) {
1195                         why = CLD_EXITED;
1196                         status = exit_code >> 8;
1197                 } else {
1198                         why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1199                         status = exit_code & 0x7f;
1200                 }
1201                 return wait_noreap_copyout(p, pid, uid, why,
1202                                            status, infop, ru);
1203         }
1204
1205         /*
1206          * Try to move the task's state to DEAD
1207          * only one thread is allowed to do this:
1208          */
1209         state = xchg(&p->exit_state, EXIT_DEAD);
1210         if (state != EXIT_ZOMBIE) {
1211                 BUG_ON(state != EXIT_DEAD);
1212                 return 0;
1213         }
1214         if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1215                 /*
1216                  * This can only happen in a race with a ptraced thread
1217                  * dying on another processor.
1218                  */
1219                 return 0;
1220         }
1221
1222         if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1223                 struct signal_struct *psig;
1224                 struct signal_struct *sig;
1225
1226                 /*
1227                  * The resource counters for the group leader are in its
1228                  * own task_struct.  Those for dead threads in the group
1229                  * are in its signal_struct, as are those for the child
1230                  * processes it has previously reaped.  All these
1231                  * accumulate in the parent's signal_struct c* fields.
1232                  *
1233                  * We don't bother to take a lock here to protect these
1234                  * p->signal fields, because they are only touched by
1235                  * __exit_signal, which runs with tasklist_lock
1236                  * write-locked anyway, and so is excluded here.  We do
1237                  * need to protect the access to p->parent->signal fields,
1238                  * as other threads in the parent group can be right
1239                  * here reaping other children at the same time.
1240                  */
1241                 spin_lock_irq(&p->parent->sighand->siglock);
1242                 psig = p->parent->signal;
1243                 sig = p->signal;
1244                 psig->cutime =
1245                         cputime_add(psig->cutime,
1246                         cputime_add(p->utime,
1247                         cputime_add(sig->utime,
1248                                     sig->cutime)));
1249                 psig->cstime =
1250                         cputime_add(psig->cstime,
1251                         cputime_add(p->stime,
1252                         cputime_add(sig->stime,
1253                                     sig->cstime)));
1254                 psig->cmin_flt +=
1255                         p->min_flt + sig->min_flt + sig->cmin_flt;
1256                 psig->cmaj_flt +=
1257                         p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1258                 psig->cnvcsw +=
1259                         p->nvcsw + sig->nvcsw + sig->cnvcsw;
1260                 psig->cnivcsw +=
1261                         p->nivcsw + sig->nivcsw + sig->cnivcsw;
1262                 psig->cinblock +=
1263                         task_io_get_inblock(p) +
1264                         sig->inblock + sig->cinblock;
1265                 psig->coublock +=
1266                         task_io_get_oublock(p) +
1267                         sig->oublock + sig->coublock;
1268                 spin_unlock_irq(&p->parent->sighand->siglock);
1269         }
1270
1271         /*
1272          * Now we are sure this task is interesting, and no other
1273          * thread can reap it because we set its state to EXIT_DEAD.
1274          */
1275         read_unlock(&tasklist_lock);
1276
1277         retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1278         status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1279                 ? p->signal->group_exit_code : p->exit_code;
1280         if (!retval && stat_addr)
1281                 retval = put_user(status, stat_addr);
1282         if (!retval && infop)
1283                 retval = put_user(SIGCHLD, &infop->si_signo);
1284         if (!retval && infop)
1285                 retval = put_user(0, &infop->si_errno);
1286         if (!retval && infop) {
1287                 int why;
1288
1289                 if ((status & 0x7f) == 0) {
1290                         why = CLD_EXITED;
1291                         status >>= 8;
1292                 } else {
1293                         why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1294                         status &= 0x7f;
1295                 }
1296                 retval = put_user((short)why, &infop->si_code);
1297                 if (!retval)
1298                         retval = put_user(status, &infop->si_status);
1299         }
1300         if (!retval && infop)
1301                 retval = put_user(p->pid, &infop->si_pid);
1302         if (!retval && infop)
1303                 retval = put_user(p->uid, &infop->si_uid);
1304         if (retval) {
1305                 // TODO: is this safe?
1306                 p->exit_state = EXIT_ZOMBIE;
1307                 return retval;
1308         }
1309         retval = p->pid;
1310         if (p->real_parent != p->parent) {
1311                 write_lock_irq(&tasklist_lock);
1312                 /* Double-check with lock held.  */
1313                 if (p->real_parent != p->parent) {
1314                         __ptrace_unlink(p);
1315                         // TODO: is this safe?
1316                         p->exit_state = EXIT_ZOMBIE;
1317                         /*
1318                          * If this is not a detached task, notify the parent.
1319                          * If it's still not detached after that, don't release
1320                          * it now.
1321                          */
1322                         if (p->exit_signal != -1) {
1323                                 do_notify_parent(p, p->exit_signal);
1324                                 if (p->exit_signal != -1)
1325                                         p = NULL;
1326                         }
1327                 }
1328                 write_unlock_irq(&tasklist_lock);
1329         }
1330         if (p != NULL)
1331                 release_task(p);
1332         BUG_ON(!retval);
1333         return retval;
1334 }
1335
1336 /*
1337  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1338  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1339  * the lock and this task is uninteresting.  If we return nonzero, we have
1340  * released the lock and the system call should return.
1341  */
1342 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1343                              int noreap, struct siginfo __user *infop,
1344                              int __user *stat_addr, struct rusage __user *ru)
1345 {
1346         int retval, exit_code;
1347
1348         if (!p->exit_code)
1349                 return 0;
1350         if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1351             p->signal && p->signal->group_stop_count > 0)
1352                 /*
1353                  * A group stop is in progress and this is the group leader.
1354                  * We won't report until all threads have stopped.
1355                  */
1356                 return 0;
1357
1358         /*
1359          * Now we are pretty sure this task is interesting.
1360          * Make sure it doesn't get reaped out from under us while we
1361          * give up the lock and then examine it below.  We don't want to
1362          * keep holding onto the tasklist_lock while we call getrusage and
1363          * possibly take page faults for user memory.
1364          */
1365         get_task_struct(p);
1366         read_unlock(&tasklist_lock);
1367
1368         if (unlikely(noreap)) {
1369                 pid_t pid = p->pid;
1370                 uid_t uid = p->uid;
1371                 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1372
1373                 exit_code = p->exit_code;
1374                 if (unlikely(!exit_code) ||
1375                     unlikely(p->state & TASK_TRACED))
1376                         goto bail_ref;
1377                 return wait_noreap_copyout(p, pid, uid,
1378                                            why, (exit_code << 8) | 0x7f,
1379                                            infop, ru);
1380         }
1381
1382         write_lock_irq(&tasklist_lock);
1383
1384         /*
1385          * This uses xchg to be atomic with the thread resuming and setting
1386          * it.  It must also be done with the write lock held to prevent a
1387          * race with the EXIT_ZOMBIE case.
1388          */
1389         exit_code = xchg(&p->exit_code, 0);
1390         if (unlikely(p->exit_state)) {
1391                 /*
1392                  * The task resumed and then died.  Let the next iteration
1393                  * catch it in EXIT_ZOMBIE.  Note that exit_code might
1394                  * already be zero here if it resumed and did _exit(0).
1395                  * The task itself is dead and won't touch exit_code again;
1396                  * other processors in this function are locked out.
1397                  */
1398                 p->exit_code = exit_code;
1399                 exit_code = 0;
1400         }
1401         if (unlikely(exit_code == 0)) {
1402                 /*
1403                  * Another thread in this function got to it first, or it
1404                  * resumed, or it resumed and then died.
1405                  */
1406                 write_unlock_irq(&tasklist_lock);
1407 bail_ref:
1408                 put_task_struct(p);
1409                 /*
1410                  * We are returning to the wait loop without having successfully
1411                  * removed the process and having released the lock. We cannot
1412                  * continue, since the "p" task pointer is potentially stale.
1413                  *
1414                  * Return -EAGAIN, and do_wait() will restart the loop from the
1415                  * beginning. Do _not_ re-acquire the lock.
1416                  */
1417                 return -EAGAIN;
1418         }
1419
1420         /* move to end of parent's list to avoid starvation */
1421         remove_parent(p);
1422         add_parent(p);
1423
1424         write_unlock_irq(&tasklist_lock);
1425
1426         retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1427         if (!retval && stat_addr)
1428                 retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1429         if (!retval && infop)
1430                 retval = put_user(SIGCHLD, &infop->si_signo);
1431         if (!retval && infop)
1432                 retval = put_user(0, &infop->si_errno);
1433         if (!retval && infop)
1434                 retval = put_user((short)((p->ptrace & PT_PTRACED)
1435                                           ? CLD_TRAPPED : CLD_STOPPED),
1436                                   &infop->si_code);
1437         if (!retval && infop)
1438                 retval = put_user(exit_code, &infop->si_status);
1439         if (!retval && infop)
1440                 retval = put_user(p->pid, &infop->si_pid);
1441         if (!retval && infop)
1442                 retval = put_user(p->uid, &infop->si_uid);
1443         if (!retval)
1444                 retval = p->pid;
1445         put_task_struct(p);
1446
1447         BUG_ON(!retval);
1448         return retval;
1449 }
1450
1451 /*
1452  * Handle do_wait work for one task in a live, non-stopped state.
1453  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1454  * the lock and this task is uninteresting.  If we return nonzero, we have
1455  * released the lock and the system call should return.
1456  */
1457 static int wait_task_continued(struct task_struct *p, int noreap,
1458                                struct siginfo __user *infop,
1459                                int __user *stat_addr, struct rusage __user *ru)
1460 {
1461         int retval;
1462         pid_t pid;
1463         uid_t uid;
1464
1465         if (unlikely(!p->signal))
1466                 return 0;
1467
1468         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1469                 return 0;
1470
1471         spin_lock_irq(&p->sighand->siglock);
1472         /* Re-check with the lock held.  */
1473         if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1474                 spin_unlock_irq(&p->sighand->siglock);
1475                 return 0;
1476         }
1477         if (!noreap)
1478                 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1479         spin_unlock_irq(&p->sighand->siglock);
1480
1481         pid = p->pid;
1482         uid = p->uid;
1483         get_task_struct(p);
1484         read_unlock(&tasklist_lock);
1485
1486         if (!infop) {
1487                 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1488                 put_task_struct(p);
1489                 if (!retval && stat_addr)
1490                         retval = put_user(0xffff, stat_addr);
1491                 if (!retval)
1492                         retval = p->pid;
1493         } else {
1494                 retval = wait_noreap_copyout(p, pid, uid,
1495                                              CLD_CONTINUED, SIGCONT,
1496                                              infop, ru);
1497                 BUG_ON(retval == 0);
1498         }
1499
1500         return retval;
1501 }
1502
1503
1504 static inline int my_ptrace_child(struct task_struct *p)
1505 {
1506         if (!(p->ptrace & PT_PTRACED))
1507                 return 0;
1508         if (!(p->ptrace & PT_ATTACHED))
1509                 return 1;
1510         /*
1511          * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1512          * we are the attacher.  If we are the real parent, this is a race
1513          * inside ptrace_attach.  It is waiting for the tasklist_lock,
1514          * which we have to switch the parent links, but has already set
1515          * the flags in p->ptrace.
1516          */
1517         return (p->parent != p->real_parent);
1518 }
1519
1520 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1521                     int __user *stat_addr, struct rusage __user *ru)
1522 {
1523         DECLARE_WAITQUEUE(wait, current);
1524         struct task_struct *tsk;
1525         int flag, retval;
1526         int allowed, denied;
1527
1528         add_wait_queue(&current->signal->wait_chldexit,&wait);
1529 repeat:
1530         /*
1531          * We will set this flag if we see any child that might later
1532          * match our criteria, even if we are not able to reap it yet.
1533          */
1534         flag = 0;
1535         allowed = denied = 0;
1536         current->state = TASK_INTERRUPTIBLE;
1537         read_lock(&tasklist_lock);
1538         tsk = current;
1539         do {
1540                 struct task_struct *p;
1541                 struct list_head *_p;
1542                 int ret;
1543
1544                 list_for_each(_p,&tsk->children) {
1545                         p = list_entry(_p, struct task_struct, sibling);
1546
1547                         ret = eligible_child(pid, options, p);
1548                         if (!ret)
1549                                 continue;
1550
1551                         if (unlikely(ret < 0)) {
1552                                 denied = ret;
1553                                 continue;
1554                         }
1555                         allowed = 1;
1556
1557                         switch (p->state) {
1558                         case TASK_TRACED:
1559                                 /*
1560                                  * When we hit the race with PTRACE_ATTACH,
1561                                  * we will not report this child.  But the
1562                                  * race means it has not yet been moved to
1563                                  * our ptrace_children list, so we need to
1564                                  * set the flag here to avoid a spurious ECHILD
1565                                  * when the race happens with the only child.
1566                                  */
1567                                 flag = 1;
1568                                 if (!my_ptrace_child(p))
1569                                         continue;
1570                                 /*FALLTHROUGH*/
1571                         case TASK_STOPPED:
1572                                 /*
1573                                  * It's stopped now, so it might later
1574                                  * continue, exit, or stop again.
1575                                  */
1576                                 flag = 1;
1577                                 if (!(options & WUNTRACED) &&
1578                                     !my_ptrace_child(p))
1579                                         continue;
1580                                 retval = wait_task_stopped(p, ret == 2,
1581                                                            (options & WNOWAIT),
1582                                                            infop,
1583                                                            stat_addr, ru);
1584                                 if (retval == -EAGAIN)
1585                                         goto repeat;
1586                                 if (retval != 0) /* He released the lock.  */
1587                                         goto end;
1588                                 break;
1589                         default:
1590                         // case EXIT_DEAD:
1591                                 if (p->exit_state == EXIT_DEAD)
1592                                         continue;
1593                         // case EXIT_ZOMBIE:
1594                                 if (p->exit_state == EXIT_ZOMBIE) {
1595                                         /*
1596                                          * Eligible but we cannot release
1597                                          * it yet:
1598                                          */
1599                                         if (ret == 2)
1600                                                 goto check_continued;
1601                                         if (!likely(options & WEXITED))
1602                                                 continue;
1603                                         retval = wait_task_zombie(
1604                                                 p, (options & WNOWAIT),
1605                                                 infop, stat_addr, ru);
1606                                         /* He released the lock.  */
1607                                         if (retval != 0)
1608                                                 goto end;
1609                                         break;
1610                                 }
1611 check_continued:
1612                                 /*
1613                                  * It's running now, so it might later
1614                                  * exit, stop, or stop and then continue.
1615                                  */
1616                                 flag = 1;
1617                                 if (!unlikely(options & WCONTINUED))
1618                                         continue;
1619                                 retval = wait_task_continued(
1620                                         p, (options & WNOWAIT),
1621                                         infop, stat_addr, ru);
1622                                 if (retval != 0) /* He released the lock.  */
1623                                         goto end;
1624                                 break;
1625                         }
1626                 }
1627                 if (!flag) {
1628                         list_for_each(_p, &tsk->ptrace_children) {
1629                                 p = list_entry(_p, struct task_struct,
1630                                                 ptrace_list);
1631                                 if (!eligible_child(pid, options, p))
1632                                         continue;
1633                                 flag = 1;
1634                                 break;
1635                         }
1636                 }
1637                 if (options & __WNOTHREAD)
1638                         break;
1639                 tsk = next_thread(tsk);
1640                 BUG_ON(tsk->signal != current->signal);
1641         } while (tsk != current);
1642
1643         read_unlock(&tasklist_lock);
1644         if (flag) {
1645                 retval = 0;
1646                 if (options & WNOHANG)
1647                         goto end;
1648                 retval = -ERESTARTSYS;
1649                 if (signal_pending(current))
1650                         goto end;
1651                 schedule();
1652                 goto repeat;
1653         }
1654         retval = -ECHILD;
1655         if (unlikely(denied) && !allowed)
1656                 retval = denied;
1657 end:
1658         current->state = TASK_RUNNING;
1659         remove_wait_queue(&current->signal->wait_chldexit,&wait);
1660         if (infop) {
1661                 if (retval > 0)
1662                 retval = 0;
1663                 else {
1664                         /*
1665                          * For a WNOHANG return, clear out all the fields
1666                          * we would set so the user can easily tell the
1667                          * difference.
1668                          */
1669                         if (!retval)
1670                                 retval = put_user(0, &infop->si_signo);
1671                         if (!retval)
1672                                 retval = put_user(0, &infop->si_errno);
1673                         if (!retval)
1674                                 retval = put_user(0, &infop->si_code);
1675                         if (!retval)
1676                                 retval = put_user(0, &infop->si_pid);
1677                         if (!retval)
1678                                 retval = put_user(0, &infop->si_uid);
1679                         if (!retval)
1680                                 retval = put_user(0, &infop->si_status);
1681                 }
1682         }
1683         return retval;
1684 }
1685
1686 asmlinkage long sys_waitid(int which, pid_t pid,
1687                            struct siginfo __user *infop, int options,
1688                            struct rusage __user *ru)
1689 {
1690         long ret;
1691
1692         if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1693                 return -EINVAL;
1694         if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1695                 return -EINVAL;
1696
1697         switch (which) {
1698         case P_ALL:
1699                 pid = -1;
1700                 break;
1701         case P_PID:
1702                 if (pid <= 0)
1703                         return -EINVAL;
1704                 break;
1705         case P_PGID:
1706                 if (pid <= 0)
1707                         return -EINVAL;
1708                 pid = -pid;
1709                 break;
1710         default:
1711                 return -EINVAL;
1712         }
1713
1714         ret = do_wait(pid, options, infop, NULL, ru);
1715
1716         /* avoid REGPARM breakage on x86: */
1717         prevent_tail_call(ret);
1718         return ret;
1719 }
1720
1721 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1722                           int options, struct rusage __user *ru)
1723 {
1724         long ret;
1725
1726         if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1727                         __WNOTHREAD|__WCLONE|__WALL))
1728                 return -EINVAL;
1729         ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1730
1731         /* avoid REGPARM breakage on x86: */
1732         prevent_tail_call(ret);
1733         return ret;
1734 }
1735
1736 #ifdef __ARCH_WANT_SYS_WAITPID
1737
1738 /*
1739  * sys_waitpid() remains for compatibility. waitpid() should be
1740  * implemented by calling sys_wait4() from libc.a.
1741  */
1742 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1743 {
1744         return sys_wait4(pid, stat_addr, options, NULL);
1745 }
1746
1747 #endif