Merge ../linux-2.6
[linux-2.6] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/smp_lock.h>
18 #include <linux/module.h>
19 #include <linux/vmalloc.h>
20 #include <linux/completion.h>
21 #include <linux/namespace.h>
22 #include <linux/personality.h>
23 #include <linux/mempolicy.h>
24 #include <linux/sem.h>
25 #include <linux/file.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/fs.h>
30 #include <linux/capability.h>
31 #include <linux/cpu.h>
32 #include <linux/cpuset.h>
33 #include <linux/security.h>
34 #include <linux/swap.h>
35 #include <linux/syscalls.h>
36 #include <linux/jiffies.h>
37 #include <linux/futex.h>
38 #include <linux/rcupdate.h>
39 #include <linux/ptrace.h>
40 #include <linux/mount.h>
41 #include <linux/audit.h>
42 #include <linux/profile.h>
43 #include <linux/rmap.h>
44 #include <linux/acct.h>
45 #include <linux/cn_proc.h>
46 #include <linux/delayacct.h>
47 #include <linux/taskstats_kern.h>
48
49 #include <asm/pgtable.h>
50 #include <asm/pgalloc.h>
51 #include <asm/uaccess.h>
52 #include <asm/mmu_context.h>
53 #include <asm/cacheflush.h>
54 #include <asm/tlbflush.h>
55
56 /*
57  * Protected counters by write_lock_irq(&tasklist_lock)
58  */
59 unsigned long total_forks;      /* Handle normal Linux uptimes. */
60 int nr_threads;                 /* The idle threads do not count.. */
61
62 int max_threads;                /* tunable limit on nr_threads */
63
64 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
65
66 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
67
68 int nr_processes(void)
69 {
70         int cpu;
71         int total = 0;
72
73         for_each_online_cpu(cpu)
74                 total += per_cpu(process_counts, cpu);
75
76         return total;
77 }
78
79 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
80 # define alloc_task_struct()    kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
81 # define free_task_struct(tsk)  kmem_cache_free(task_struct_cachep, (tsk))
82 static kmem_cache_t *task_struct_cachep;
83 #endif
84
85 /* SLAB cache for signal_struct structures (tsk->signal) */
86 static kmem_cache_t *signal_cachep;
87
88 /* SLAB cache for sighand_struct structures (tsk->sighand) */
89 kmem_cache_t *sighand_cachep;
90
91 /* SLAB cache for files_struct structures (tsk->files) */
92 kmem_cache_t *files_cachep;
93
94 /* SLAB cache for fs_struct structures (tsk->fs) */
95 kmem_cache_t *fs_cachep;
96
97 /* SLAB cache for vm_area_struct structures */
98 kmem_cache_t *vm_area_cachep;
99
100 /* SLAB cache for mm_struct structures (tsk->mm) */
101 static kmem_cache_t *mm_cachep;
102
103 void free_task(struct task_struct *tsk)
104 {
105         free_thread_info(tsk->thread_info);
106         rt_mutex_debug_task_free(tsk);
107         free_task_struct(tsk);
108 }
109 EXPORT_SYMBOL(free_task);
110
111 void __put_task_struct(struct task_struct *tsk)
112 {
113         WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
114         WARN_ON(atomic_read(&tsk->usage));
115         WARN_ON(tsk == current);
116
117         security_task_free(tsk);
118         free_uid(tsk->user);
119         put_group_info(tsk->group_info);
120
121         if (!profile_handoff_task(tsk))
122                 free_task(tsk);
123 }
124
125 void __init fork_init(unsigned long mempages)
126 {
127 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
128 #ifndef ARCH_MIN_TASKALIGN
129 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
130 #endif
131         /* create a slab on which task_structs can be allocated */
132         task_struct_cachep =
133                 kmem_cache_create("task_struct", sizeof(struct task_struct),
134                         ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
135 #endif
136
137         /*
138          * The default maximum number of threads is set to a safe
139          * value: the thread structures can take up at most half
140          * of memory.
141          */
142         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
143
144         /*
145          * we need to allow at least 20 threads to boot a system
146          */
147         if(max_threads < 20)
148                 max_threads = 20;
149
150         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
151         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
152         init_task.signal->rlim[RLIMIT_SIGPENDING] =
153                 init_task.signal->rlim[RLIMIT_NPROC];
154 }
155
156 static struct task_struct *dup_task_struct(struct task_struct *orig)
157 {
158         struct task_struct *tsk;
159         struct thread_info *ti;
160
161         prepare_to_copy(orig);
162
163         tsk = alloc_task_struct();
164         if (!tsk)
165                 return NULL;
166
167         ti = alloc_thread_info(tsk);
168         if (!ti) {
169                 free_task_struct(tsk);
170                 return NULL;
171         }
172
173         *tsk = *orig;
174         tsk->thread_info = ti;
175         setup_thread_stack(tsk, orig);
176
177         /* One for us, one for whoever does the "release_task()" (usually parent) */
178         atomic_set(&tsk->usage,2);
179         atomic_set(&tsk->fs_excl, 0);
180         tsk->btrace_seq = 0;
181         tsk->splice_pipe = NULL;
182         return tsk;
183 }
184
185 #ifdef CONFIG_MMU
186 static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
187 {
188         struct vm_area_struct *mpnt, *tmp, **pprev;
189         struct rb_node **rb_link, *rb_parent;
190         int retval;
191         unsigned long charge;
192         struct mempolicy *pol;
193
194         down_write(&oldmm->mmap_sem);
195         flush_cache_mm(oldmm);
196         /*
197          * Not linked in yet - no deadlock potential:
198          */
199         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
200
201         mm->locked_vm = 0;
202         mm->mmap = NULL;
203         mm->mmap_cache = NULL;
204         mm->free_area_cache = oldmm->mmap_base;
205         mm->cached_hole_size = ~0UL;
206         mm->map_count = 0;
207         cpus_clear(mm->cpu_vm_mask);
208         mm->mm_rb = RB_ROOT;
209         rb_link = &mm->mm_rb.rb_node;
210         rb_parent = NULL;
211         pprev = &mm->mmap;
212
213         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
214                 struct file *file;
215
216                 if (mpnt->vm_flags & VM_DONTCOPY) {
217                         long pages = vma_pages(mpnt);
218                         mm->total_vm -= pages;
219                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
220                                                                 -pages);
221                         continue;
222                 }
223                 charge = 0;
224                 if (mpnt->vm_flags & VM_ACCOUNT) {
225                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
226                         if (security_vm_enough_memory(len))
227                                 goto fail_nomem;
228                         charge = len;
229                 }
230                 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
231                 if (!tmp)
232                         goto fail_nomem;
233                 *tmp = *mpnt;
234                 pol = mpol_copy(vma_policy(mpnt));
235                 retval = PTR_ERR(pol);
236                 if (IS_ERR(pol))
237                         goto fail_nomem_policy;
238                 vma_set_policy(tmp, pol);
239                 tmp->vm_flags &= ~VM_LOCKED;
240                 tmp->vm_mm = mm;
241                 tmp->vm_next = NULL;
242                 anon_vma_link(tmp);
243                 file = tmp->vm_file;
244                 if (file) {
245                         struct inode *inode = file->f_dentry->d_inode;
246                         get_file(file);
247                         if (tmp->vm_flags & VM_DENYWRITE)
248                                 atomic_dec(&inode->i_writecount);
249       
250                         /* insert tmp into the share list, just after mpnt */
251                         spin_lock(&file->f_mapping->i_mmap_lock);
252                         tmp->vm_truncate_count = mpnt->vm_truncate_count;
253                         flush_dcache_mmap_lock(file->f_mapping);
254                         vma_prio_tree_add(tmp, mpnt);
255                         flush_dcache_mmap_unlock(file->f_mapping);
256                         spin_unlock(&file->f_mapping->i_mmap_lock);
257                 }
258
259                 /*
260                  * Link in the new vma and copy the page table entries.
261                  */
262                 *pprev = tmp;
263                 pprev = &tmp->vm_next;
264
265                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
266                 rb_link = &tmp->vm_rb.rb_right;
267                 rb_parent = &tmp->vm_rb;
268
269                 mm->map_count++;
270                 retval = copy_page_range(mm, oldmm, mpnt);
271
272                 if (tmp->vm_ops && tmp->vm_ops->open)
273                         tmp->vm_ops->open(tmp);
274
275                 if (retval)
276                         goto out;
277         }
278         retval = 0;
279 out:
280         up_write(&mm->mmap_sem);
281         flush_tlb_mm(oldmm);
282         up_write(&oldmm->mmap_sem);
283         return retval;
284 fail_nomem_policy:
285         kmem_cache_free(vm_area_cachep, tmp);
286 fail_nomem:
287         retval = -ENOMEM;
288         vm_unacct_memory(charge);
289         goto out;
290 }
291
292 static inline int mm_alloc_pgd(struct mm_struct * mm)
293 {
294         mm->pgd = pgd_alloc(mm);
295         if (unlikely(!mm->pgd))
296                 return -ENOMEM;
297         return 0;
298 }
299
300 static inline void mm_free_pgd(struct mm_struct * mm)
301 {
302         pgd_free(mm->pgd);
303 }
304 #else
305 #define dup_mmap(mm, oldmm)     (0)
306 #define mm_alloc_pgd(mm)        (0)
307 #define mm_free_pgd(mm)
308 #endif /* CONFIG_MMU */
309
310  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
311
312 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
313 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
314
315 #include <linux/init_task.h>
316
317 static struct mm_struct * mm_init(struct mm_struct * mm)
318 {
319         atomic_set(&mm->mm_users, 1);
320         atomic_set(&mm->mm_count, 1);
321         init_rwsem(&mm->mmap_sem);
322         INIT_LIST_HEAD(&mm->mmlist);
323         mm->core_waiters = 0;
324         mm->nr_ptes = 0;
325         set_mm_counter(mm, file_rss, 0);
326         set_mm_counter(mm, anon_rss, 0);
327         spin_lock_init(&mm->page_table_lock);
328         rwlock_init(&mm->ioctx_list_lock);
329         mm->ioctx_list = NULL;
330         mm->free_area_cache = TASK_UNMAPPED_BASE;
331         mm->cached_hole_size = ~0UL;
332
333         if (likely(!mm_alloc_pgd(mm))) {
334                 mm->def_flags = 0;
335                 return mm;
336         }
337         free_mm(mm);
338         return NULL;
339 }
340
341 /*
342  * Allocate and initialize an mm_struct.
343  */
344 struct mm_struct * mm_alloc(void)
345 {
346         struct mm_struct * mm;
347
348         mm = allocate_mm();
349         if (mm) {
350                 memset(mm, 0, sizeof(*mm));
351                 mm = mm_init(mm);
352         }
353         return mm;
354 }
355
356 /*
357  * Called when the last reference to the mm
358  * is dropped: either by a lazy thread or by
359  * mmput. Free the page directory and the mm.
360  */
361 void fastcall __mmdrop(struct mm_struct *mm)
362 {
363         BUG_ON(mm == &init_mm);
364         mm_free_pgd(mm);
365         destroy_context(mm);
366         free_mm(mm);
367 }
368
369 /*
370  * Decrement the use count and release all resources for an mm.
371  */
372 void mmput(struct mm_struct *mm)
373 {
374         might_sleep();
375
376         if (atomic_dec_and_test(&mm->mm_users)) {
377                 exit_aio(mm);
378                 exit_mmap(mm);
379                 if (!list_empty(&mm->mmlist)) {
380                         spin_lock(&mmlist_lock);
381                         list_del(&mm->mmlist);
382                         spin_unlock(&mmlist_lock);
383                 }
384                 put_swap_token(mm);
385                 mmdrop(mm);
386         }
387 }
388 EXPORT_SYMBOL_GPL(mmput);
389
390 /**
391  * get_task_mm - acquire a reference to the task's mm
392  *
393  * Returns %NULL if the task has no mm.  Checks PF_BORROWED_MM (meaning
394  * this kernel workthread has transiently adopted a user mm with use_mm,
395  * to do its AIO) is not set and if so returns a reference to it, after
396  * bumping up the use count.  User must release the mm via mmput()
397  * after use.  Typically used by /proc and ptrace.
398  */
399 struct mm_struct *get_task_mm(struct task_struct *task)
400 {
401         struct mm_struct *mm;
402
403         task_lock(task);
404         mm = task->mm;
405         if (mm) {
406                 if (task->flags & PF_BORROWED_MM)
407                         mm = NULL;
408                 else
409                         atomic_inc(&mm->mm_users);
410         }
411         task_unlock(task);
412         return mm;
413 }
414 EXPORT_SYMBOL_GPL(get_task_mm);
415
416 /* Please note the differences between mmput and mm_release.
417  * mmput is called whenever we stop holding onto a mm_struct,
418  * error success whatever.
419  *
420  * mm_release is called after a mm_struct has been removed
421  * from the current process.
422  *
423  * This difference is important for error handling, when we
424  * only half set up a mm_struct for a new process and need to restore
425  * the old one.  Because we mmput the new mm_struct before
426  * restoring the old one. . .
427  * Eric Biederman 10 January 1998
428  */
429 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
430 {
431         struct completion *vfork_done = tsk->vfork_done;
432
433         /* Get rid of any cached register state */
434         deactivate_mm(tsk, mm);
435
436         /* notify parent sleeping on vfork() */
437         if (vfork_done) {
438                 tsk->vfork_done = NULL;
439                 complete(vfork_done);
440         }
441         if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
442                 u32 __user * tidptr = tsk->clear_child_tid;
443                 tsk->clear_child_tid = NULL;
444
445                 /*
446                  * We don't check the error code - if userspace has
447                  * not set up a proper pointer then tough luck.
448                  */
449                 put_user(0, tidptr);
450                 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
451         }
452 }
453
454 /*
455  * Allocate a new mm structure and copy contents from the
456  * mm structure of the passed in task structure.
457  */
458 static struct mm_struct *dup_mm(struct task_struct *tsk)
459 {
460         struct mm_struct *mm, *oldmm = current->mm;
461         int err;
462
463         if (!oldmm)
464                 return NULL;
465
466         mm = allocate_mm();
467         if (!mm)
468                 goto fail_nomem;
469
470         memcpy(mm, oldmm, sizeof(*mm));
471
472         if (!mm_init(mm))
473                 goto fail_nomem;
474
475         if (init_new_context(tsk, mm))
476                 goto fail_nocontext;
477
478         err = dup_mmap(mm, oldmm);
479         if (err)
480                 goto free_pt;
481
482         mm->hiwater_rss = get_mm_rss(mm);
483         mm->hiwater_vm = mm->total_vm;
484
485         return mm;
486
487 free_pt:
488         mmput(mm);
489
490 fail_nomem:
491         return NULL;
492
493 fail_nocontext:
494         /*
495          * If init_new_context() failed, we cannot use mmput() to free the mm
496          * because it calls destroy_context()
497          */
498         mm_free_pgd(mm);
499         free_mm(mm);
500         return NULL;
501 }
502
503 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
504 {
505         struct mm_struct * mm, *oldmm;
506         int retval;
507
508         tsk->min_flt = tsk->maj_flt = 0;
509         tsk->nvcsw = tsk->nivcsw = 0;
510
511         tsk->mm = NULL;
512         tsk->active_mm = NULL;
513
514         /*
515          * Are we cloning a kernel thread?
516          *
517          * We need to steal a active VM for that..
518          */
519         oldmm = current->mm;
520         if (!oldmm)
521                 return 0;
522
523         if (clone_flags & CLONE_VM) {
524                 atomic_inc(&oldmm->mm_users);
525                 mm = oldmm;
526                 goto good_mm;
527         }
528
529         retval = -ENOMEM;
530         mm = dup_mm(tsk);
531         if (!mm)
532                 goto fail_nomem;
533
534 good_mm:
535         tsk->mm = mm;
536         tsk->active_mm = mm;
537         return 0;
538
539 fail_nomem:
540         return retval;
541 }
542
543 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
544 {
545         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
546         /* We don't need to lock fs - think why ;-) */
547         if (fs) {
548                 atomic_set(&fs->count, 1);
549                 rwlock_init(&fs->lock);
550                 fs->umask = old->umask;
551                 read_lock(&old->lock);
552                 fs->rootmnt = mntget(old->rootmnt);
553                 fs->root = dget(old->root);
554                 fs->pwdmnt = mntget(old->pwdmnt);
555                 fs->pwd = dget(old->pwd);
556                 if (old->altroot) {
557                         fs->altrootmnt = mntget(old->altrootmnt);
558                         fs->altroot = dget(old->altroot);
559                 } else {
560                         fs->altrootmnt = NULL;
561                         fs->altroot = NULL;
562                 }
563                 read_unlock(&old->lock);
564         }
565         return fs;
566 }
567
568 struct fs_struct *copy_fs_struct(struct fs_struct *old)
569 {
570         return __copy_fs_struct(old);
571 }
572
573 EXPORT_SYMBOL_GPL(copy_fs_struct);
574
575 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
576 {
577         if (clone_flags & CLONE_FS) {
578                 atomic_inc(&current->fs->count);
579                 return 0;
580         }
581         tsk->fs = __copy_fs_struct(current->fs);
582         if (!tsk->fs)
583                 return -ENOMEM;
584         return 0;
585 }
586
587 static int count_open_files(struct fdtable *fdt)
588 {
589         int size = fdt->max_fdset;
590         int i;
591
592         /* Find the last open fd */
593         for (i = size/(8*sizeof(long)); i > 0; ) {
594                 if (fdt->open_fds->fds_bits[--i])
595                         break;
596         }
597         i = (i+1) * 8 * sizeof(long);
598         return i;
599 }
600
601 static struct files_struct *alloc_files(void)
602 {
603         struct files_struct *newf;
604         struct fdtable *fdt;
605
606         newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
607         if (!newf)
608                 goto out;
609
610         atomic_set(&newf->count, 1);
611
612         spin_lock_init(&newf->file_lock);
613         newf->next_fd = 0;
614         fdt = &newf->fdtab;
615         fdt->max_fds = NR_OPEN_DEFAULT;
616         fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
617         fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
618         fdt->open_fds = (fd_set *)&newf->open_fds_init;
619         fdt->fd = &newf->fd_array[0];
620         INIT_RCU_HEAD(&fdt->rcu);
621         fdt->free_files = NULL;
622         fdt->next = NULL;
623         rcu_assign_pointer(newf->fdt, fdt);
624 out:
625         return newf;
626 }
627
628 /*
629  * Allocate a new files structure and copy contents from the
630  * passed in files structure.
631  * errorp will be valid only when the returned files_struct is NULL.
632  */
633 static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
634 {
635         struct files_struct *newf;
636         struct file **old_fds, **new_fds;
637         int open_files, size, i, expand;
638         struct fdtable *old_fdt, *new_fdt;
639
640         *errorp = -ENOMEM;
641         newf = alloc_files();
642         if (!newf)
643                 goto out;
644
645         spin_lock(&oldf->file_lock);
646         old_fdt = files_fdtable(oldf);
647         new_fdt = files_fdtable(newf);
648         size = old_fdt->max_fdset;
649         open_files = count_open_files(old_fdt);
650         expand = 0;
651
652         /*
653          * Check whether we need to allocate a larger fd array or fd set.
654          * Note: we're not a clone task, so the open count won't  change.
655          */
656         if (open_files > new_fdt->max_fdset) {
657                 new_fdt->max_fdset = 0;
658                 expand = 1;
659         }
660         if (open_files > new_fdt->max_fds) {
661                 new_fdt->max_fds = 0;
662                 expand = 1;
663         }
664
665         /* if the old fdset gets grown now, we'll only copy up to "size" fds */
666         if (expand) {
667                 spin_unlock(&oldf->file_lock);
668                 spin_lock(&newf->file_lock);
669                 *errorp = expand_files(newf, open_files-1);
670                 spin_unlock(&newf->file_lock);
671                 if (*errorp < 0)
672                         goto out_release;
673                 new_fdt = files_fdtable(newf);
674                 /*
675                  * Reacquire the oldf lock and a pointer to its fd table
676                  * who knows it may have a new bigger fd table. We need
677                  * the latest pointer.
678                  */
679                 spin_lock(&oldf->file_lock);
680                 old_fdt = files_fdtable(oldf);
681         }
682
683         old_fds = old_fdt->fd;
684         new_fds = new_fdt->fd;
685
686         memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
687         memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);
688
689         for (i = open_files; i != 0; i--) {
690                 struct file *f = *old_fds++;
691                 if (f) {
692                         get_file(f);
693                 } else {
694                         /*
695                          * The fd may be claimed in the fd bitmap but not yet
696                          * instantiated in the files array if a sibling thread
697                          * is partway through open().  So make sure that this
698                          * fd is available to the new process.
699                          */
700                         FD_CLR(open_files - i, new_fdt->open_fds);
701                 }
702                 rcu_assign_pointer(*new_fds++, f);
703         }
704         spin_unlock(&oldf->file_lock);
705
706         /* compute the remainder to be cleared */
707         size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
708
709         /* This is long word aligned thus could use a optimized version */ 
710         memset(new_fds, 0, size); 
711
712         if (new_fdt->max_fdset > open_files) {
713                 int left = (new_fdt->max_fdset-open_files)/8;
714                 int start = open_files / (8 * sizeof(unsigned long));
715
716                 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
717                 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
718         }
719
720 out:
721         return newf;
722
723 out_release:
724         free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
725         free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
726         free_fd_array(new_fdt->fd, new_fdt->max_fds);
727         kmem_cache_free(files_cachep, newf);
728         return NULL;
729 }
730
731 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
732 {
733         struct files_struct *oldf, *newf;
734         int error = 0;
735
736         /*
737          * A background process may not have any files ...
738          */
739         oldf = current->files;
740         if (!oldf)
741                 goto out;
742
743         if (clone_flags & CLONE_FILES) {
744                 atomic_inc(&oldf->count);
745                 goto out;
746         }
747
748         /*
749          * Note: we may be using current for both targets (See exec.c)
750          * This works because we cache current->files (old) as oldf. Don't
751          * break this.
752          */
753         tsk->files = NULL;
754         newf = dup_fd(oldf, &error);
755         if (!newf)
756                 goto out;
757
758         tsk->files = newf;
759         error = 0;
760 out:
761         return error;
762 }
763
764 /*
765  *      Helper to unshare the files of the current task.
766  *      We don't want to expose copy_files internals to
767  *      the exec layer of the kernel.
768  */
769
770 int unshare_files(void)
771 {
772         struct files_struct *files  = current->files;
773         int rc;
774
775         BUG_ON(!files);
776
777         /* This can race but the race causes us to copy when we don't
778            need to and drop the copy */
779         if(atomic_read(&files->count) == 1)
780         {
781                 atomic_inc(&files->count);
782                 return 0;
783         }
784         rc = copy_files(0, current);
785         if(rc)
786                 current->files = files;
787         return rc;
788 }
789
790 EXPORT_SYMBOL(unshare_files);
791
792 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
793 {
794         struct sighand_struct *sig;
795
796         if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
797                 atomic_inc(&current->sighand->count);
798                 return 0;
799         }
800         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
801         rcu_assign_pointer(tsk->sighand, sig);
802         if (!sig)
803                 return -ENOMEM;
804         atomic_set(&sig->count, 1);
805         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
806         return 0;
807 }
808
809 void __cleanup_sighand(struct sighand_struct *sighand)
810 {
811         if (atomic_dec_and_test(&sighand->count))
812                 kmem_cache_free(sighand_cachep, sighand);
813 }
814
815 static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
816 {
817         struct signal_struct *sig;
818         int ret;
819
820         if (clone_flags & CLONE_THREAD) {
821                 atomic_inc(&current->signal->count);
822                 atomic_inc(&current->signal->live);
823                 taskstats_tgid_alloc(current->signal);
824                 return 0;
825         }
826         sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
827         tsk->signal = sig;
828         if (!sig)
829                 return -ENOMEM;
830
831         ret = copy_thread_group_keys(tsk);
832         if (ret < 0) {
833                 kmem_cache_free(signal_cachep, sig);
834                 return ret;
835         }
836
837         atomic_set(&sig->count, 1);
838         atomic_set(&sig->live, 1);
839         init_waitqueue_head(&sig->wait_chldexit);
840         sig->flags = 0;
841         sig->group_exit_code = 0;
842         sig->group_exit_task = NULL;
843         sig->group_stop_count = 0;
844         sig->curr_target = NULL;
845         init_sigpending(&sig->shared_pending);
846         INIT_LIST_HEAD(&sig->posix_timers);
847
848         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
849         sig->it_real_incr.tv64 = 0;
850         sig->real_timer.function = it_real_fn;
851         sig->tsk = tsk;
852
853         sig->it_virt_expires = cputime_zero;
854         sig->it_virt_incr = cputime_zero;
855         sig->it_prof_expires = cputime_zero;
856         sig->it_prof_incr = cputime_zero;
857
858         sig->leader = 0;        /* session leadership doesn't inherit */
859         sig->tty_old_pgrp = 0;
860
861         sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
862         sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
863         sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
864         sig->sched_time = 0;
865         INIT_LIST_HEAD(&sig->cpu_timers[0]);
866         INIT_LIST_HEAD(&sig->cpu_timers[1]);
867         INIT_LIST_HEAD(&sig->cpu_timers[2]);
868         taskstats_tgid_init(sig);
869
870         task_lock(current->group_leader);
871         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
872         task_unlock(current->group_leader);
873
874         if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
875                 /*
876                  * New sole thread in the process gets an expiry time
877                  * of the whole CPU time limit.
878                  */
879                 tsk->it_prof_expires =
880                         secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
881         }
882         acct_init_pacct(&sig->pacct);
883
884         return 0;
885 }
886
887 void __cleanup_signal(struct signal_struct *sig)
888 {
889         exit_thread_group_keys(sig);
890         taskstats_tgid_free(sig);
891         kmem_cache_free(signal_cachep, sig);
892 }
893
894 static inline void cleanup_signal(struct task_struct *tsk)
895 {
896         struct signal_struct *sig = tsk->signal;
897
898         atomic_dec(&sig->live);
899
900         if (atomic_dec_and_test(&sig->count))
901                 __cleanup_signal(sig);
902 }
903
904 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
905 {
906         unsigned long new_flags = p->flags;
907
908         new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
909         new_flags |= PF_FORKNOEXEC;
910         if (!(clone_flags & CLONE_PTRACE))
911                 p->ptrace = 0;
912         p->flags = new_flags;
913 }
914
915 asmlinkage long sys_set_tid_address(int __user *tidptr)
916 {
917         current->clear_child_tid = tidptr;
918
919         return current->pid;
920 }
921
922 static inline void rt_mutex_init_task(struct task_struct *p)
923 {
924 #ifdef CONFIG_RT_MUTEXES
925         spin_lock_init(&p->pi_lock);
926         plist_head_init(&p->pi_waiters, &p->pi_lock);
927         p->pi_blocked_on = NULL;
928 #endif
929 }
930
931 /*
932  * This creates a new process as a copy of the old one,
933  * but does not actually start it yet.
934  *
935  * It copies the registers, and all the appropriate
936  * parts of the process environment (as per the clone
937  * flags). The actual kick-off is left to the caller.
938  */
939 static struct task_struct *copy_process(unsigned long clone_flags,
940                                         unsigned long stack_start,
941                                         struct pt_regs *regs,
942                                         unsigned long stack_size,
943                                         int __user *parent_tidptr,
944                                         int __user *child_tidptr,
945                                         int pid)
946 {
947         int retval;
948         struct task_struct *p = NULL;
949
950         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
951                 return ERR_PTR(-EINVAL);
952
953         /*
954          * Thread groups must share signals as well, and detached threads
955          * can only be started up within the thread group.
956          */
957         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
958                 return ERR_PTR(-EINVAL);
959
960         /*
961          * Shared signal handlers imply shared VM. By way of the above,
962          * thread groups also imply shared VM. Blocking this case allows
963          * for various simplifications in other code.
964          */
965         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
966                 return ERR_PTR(-EINVAL);
967
968         retval = security_task_create(clone_flags);
969         if (retval)
970                 goto fork_out;
971
972         retval = -ENOMEM;
973         p = dup_task_struct(current);
974         if (!p)
975                 goto fork_out;
976
977 #ifdef CONFIG_TRACE_IRQFLAGS
978         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
979         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
980 #endif
981         retval = -EAGAIN;
982         if (atomic_read(&p->user->processes) >=
983                         p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
984                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
985                                 p->user != &root_user)
986                         goto bad_fork_free;
987         }
988
989         atomic_inc(&p->user->__count);
990         atomic_inc(&p->user->processes);
991         get_group_info(p->group_info);
992
993         /*
994          * If multiple threads are within copy_process(), then this check
995          * triggers too late. This doesn't hurt, the check is only there
996          * to stop root fork bombs.
997          */
998         if (nr_threads >= max_threads)
999                 goto bad_fork_cleanup_count;
1000
1001         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1002                 goto bad_fork_cleanup_count;
1003
1004         if (p->binfmt && !try_module_get(p->binfmt->module))
1005                 goto bad_fork_cleanup_put_domain;
1006
1007         p->did_exec = 0;
1008         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1009         copy_flags(clone_flags, p);
1010         p->pid = pid;
1011         retval = -EFAULT;
1012         if (clone_flags & CLONE_PARENT_SETTID)
1013                 if (put_user(p->pid, parent_tidptr))
1014                         goto bad_fork_cleanup;
1015
1016         INIT_LIST_HEAD(&p->children);
1017         INIT_LIST_HEAD(&p->sibling);
1018         p->vfork_done = NULL;
1019         spin_lock_init(&p->alloc_lock);
1020
1021         clear_tsk_thread_flag(p, TIF_SIGPENDING);
1022         init_sigpending(&p->pending);
1023
1024         p->utime = cputime_zero;
1025         p->stime = cputime_zero;
1026         p->sched_time = 0;
1027         p->rchar = 0;           /* I/O counter: bytes read */
1028         p->wchar = 0;           /* I/O counter: bytes written */
1029         p->syscr = 0;           /* I/O counter: read syscalls */
1030         p->syscw = 0;           /* I/O counter: write syscalls */
1031         acct_clear_integrals(p);
1032
1033         p->it_virt_expires = cputime_zero;
1034         p->it_prof_expires = cputime_zero;
1035         p->it_sched_expires = 0;
1036         INIT_LIST_HEAD(&p->cpu_timers[0]);
1037         INIT_LIST_HEAD(&p->cpu_timers[1]);
1038         INIT_LIST_HEAD(&p->cpu_timers[2]);
1039
1040         p->lock_depth = -1;             /* -1 = no lock */
1041         do_posix_clock_monotonic_gettime(&p->start_time);
1042         p->security = NULL;
1043         p->io_context = NULL;
1044         p->io_wait = NULL;
1045         p->audit_context = NULL;
1046         cpuset_fork(p);
1047 #ifdef CONFIG_NUMA
1048         p->mempolicy = mpol_copy(p->mempolicy);
1049         if (IS_ERR(p->mempolicy)) {
1050                 retval = PTR_ERR(p->mempolicy);
1051                 p->mempolicy = NULL;
1052                 goto bad_fork_cleanup_cpuset;
1053         }
1054         mpol_fix_fork_child_flag(p);
1055 #endif
1056 #ifdef CONFIG_TRACE_IRQFLAGS
1057         p->irq_events = 0;
1058         p->hardirqs_enabled = 0;
1059         p->hardirq_enable_ip = 0;
1060         p->hardirq_enable_event = 0;
1061         p->hardirq_disable_ip = _THIS_IP_;
1062         p->hardirq_disable_event = 0;
1063         p->softirqs_enabled = 1;
1064         p->softirq_enable_ip = _THIS_IP_;
1065         p->softirq_enable_event = 0;
1066         p->softirq_disable_ip = 0;
1067         p->softirq_disable_event = 0;
1068         p->hardirq_context = 0;
1069         p->softirq_context = 0;
1070 #endif
1071 #ifdef CONFIG_LOCKDEP
1072         p->lockdep_depth = 0; /* no locks held yet */
1073         p->curr_chain_key = 0;
1074         p->lockdep_recursion = 0;
1075 #endif
1076
1077         rt_mutex_init_task(p);
1078
1079 #ifdef CONFIG_DEBUG_MUTEXES
1080         p->blocked_on = NULL; /* not blocked yet */
1081 #endif
1082
1083         p->tgid = p->pid;
1084         if (clone_flags & CLONE_THREAD)
1085                 p->tgid = current->tgid;
1086
1087         if ((retval = security_task_alloc(p)))
1088                 goto bad_fork_cleanup_policy;
1089         if ((retval = audit_alloc(p)))
1090                 goto bad_fork_cleanup_security;
1091         /* copy all the process information */
1092         if ((retval = copy_semundo(clone_flags, p)))
1093                 goto bad_fork_cleanup_audit;
1094         if ((retval = copy_files(clone_flags, p)))
1095                 goto bad_fork_cleanup_semundo;
1096         if ((retval = copy_fs(clone_flags, p)))
1097                 goto bad_fork_cleanup_files;
1098         if ((retval = copy_sighand(clone_flags, p)))
1099                 goto bad_fork_cleanup_fs;
1100         if ((retval = copy_signal(clone_flags, p)))
1101                 goto bad_fork_cleanup_sighand;
1102         if ((retval = copy_mm(clone_flags, p)))
1103                 goto bad_fork_cleanup_signal;
1104         if ((retval = copy_keys(clone_flags, p)))
1105                 goto bad_fork_cleanup_mm;
1106         if ((retval = copy_namespace(clone_flags, p)))
1107                 goto bad_fork_cleanup_keys;
1108         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1109         if (retval)
1110                 goto bad_fork_cleanup_namespace;
1111
1112         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1113         /*
1114          * Clear TID on mm_release()?
1115          */
1116         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1117         p->robust_list = NULL;
1118 #ifdef CONFIG_COMPAT
1119         p->compat_robust_list = NULL;
1120 #endif
1121         INIT_LIST_HEAD(&p->pi_state_list);
1122         p->pi_state_cache = NULL;
1123
1124         /*
1125          * sigaltstack should be cleared when sharing the same VM
1126          */
1127         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1128                 p->sas_ss_sp = p->sas_ss_size = 0;
1129
1130         /*
1131          * Syscall tracing should be turned off in the child regardless
1132          * of CLONE_PTRACE.
1133          */
1134         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1135 #ifdef TIF_SYSCALL_EMU
1136         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1137 #endif
1138
1139         /* Our parent execution domain becomes current domain
1140            These must match for thread signalling to apply */
1141            
1142         p->parent_exec_id = p->self_exec_id;
1143
1144         /* ok, now we should be set up.. */
1145         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1146         p->pdeath_signal = 0;
1147         p->exit_state = 0;
1148
1149         /*
1150          * Ok, make it visible to the rest of the system.
1151          * We dont wake it up yet.
1152          */
1153         p->group_leader = p;
1154         INIT_LIST_HEAD(&p->thread_group);
1155         INIT_LIST_HEAD(&p->ptrace_children);
1156         INIT_LIST_HEAD(&p->ptrace_list);
1157
1158         /* Perform scheduler related setup. Assign this task to a CPU. */
1159         sched_fork(p, clone_flags);
1160
1161         /* Need tasklist lock for parent etc handling! */
1162         write_lock_irq(&tasklist_lock);
1163
1164         /*
1165          * The task hasn't been attached yet, so its cpus_allowed mask will
1166          * not be changed, nor will its assigned CPU.
1167          *
1168          * The cpus_allowed mask of the parent may have changed after it was
1169          * copied first time - so re-copy it here, then check the child's CPU
1170          * to ensure it is on a valid CPU (and if not, just force it back to
1171          * parent's CPU). This avoids alot of nasty races.
1172          */
1173         p->cpus_allowed = current->cpus_allowed;
1174         if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1175                         !cpu_online(task_cpu(p))))
1176                 set_task_cpu(p, smp_processor_id());
1177
1178         /* CLONE_PARENT re-uses the old parent */
1179         if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1180                 p->real_parent = current->real_parent;
1181         else
1182                 p->real_parent = current;
1183         p->parent = p->real_parent;
1184
1185         spin_lock(&current->sighand->siglock);
1186
1187         /*
1188          * Process group and session signals need to be delivered to just the
1189          * parent before the fork or both the parent and the child after the
1190          * fork. Restart if a signal comes in before we add the new process to
1191          * it's process group.
1192          * A fatal signal pending means that current will exit, so the new
1193          * thread can't slip out of an OOM kill (or normal SIGKILL).
1194          */
1195         recalc_sigpending();
1196         if (signal_pending(current)) {
1197                 spin_unlock(&current->sighand->siglock);
1198                 write_unlock_irq(&tasklist_lock);
1199                 retval = -ERESTARTNOINTR;
1200                 goto bad_fork_cleanup_namespace;
1201         }
1202
1203         if (clone_flags & CLONE_THREAD) {
1204                 p->group_leader = current->group_leader;
1205                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1206
1207                 if (!cputime_eq(current->signal->it_virt_expires,
1208                                 cputime_zero) ||
1209                     !cputime_eq(current->signal->it_prof_expires,
1210                                 cputime_zero) ||
1211                     current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1212                     !list_empty(&current->signal->cpu_timers[0]) ||
1213                     !list_empty(&current->signal->cpu_timers[1]) ||
1214                     !list_empty(&current->signal->cpu_timers[2])) {
1215                         /*
1216                          * Have child wake up on its first tick to check
1217                          * for process CPU timers.
1218                          */
1219                         p->it_prof_expires = jiffies_to_cputime(1);
1220                 }
1221         }
1222
1223         /*
1224          * inherit ioprio
1225          */
1226         p->ioprio = current->ioprio;
1227
1228         if (likely(p->pid)) {
1229                 add_parent(p);
1230                 if (unlikely(p->ptrace & PT_PTRACED))
1231                         __ptrace_link(p, current->parent);
1232
1233                 if (thread_group_leader(p)) {
1234                         p->signal->tty = current->signal->tty;
1235                         p->signal->pgrp = process_group(current);
1236                         p->signal->session = current->signal->session;
1237                         attach_pid(p, PIDTYPE_PGID, process_group(p));
1238                         attach_pid(p, PIDTYPE_SID, p->signal->session);
1239
1240                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1241                         __get_cpu_var(process_counts)++;
1242                 }
1243                 attach_pid(p, PIDTYPE_PID, p->pid);
1244                 nr_threads++;
1245         }
1246
1247         total_forks++;
1248         spin_unlock(&current->sighand->siglock);
1249         write_unlock_irq(&tasklist_lock);
1250         proc_fork_connector(p);
1251         return p;
1252
1253 bad_fork_cleanup_namespace:
1254         exit_namespace(p);
1255 bad_fork_cleanup_keys:
1256         exit_keys(p);
1257 bad_fork_cleanup_mm:
1258         if (p->mm)
1259                 mmput(p->mm);
1260 bad_fork_cleanup_signal:
1261         cleanup_signal(p);
1262 bad_fork_cleanup_sighand:
1263         __cleanup_sighand(p->sighand);
1264 bad_fork_cleanup_fs:
1265         exit_fs(p); /* blocking */
1266 bad_fork_cleanup_files:
1267         exit_files(p); /* blocking */
1268 bad_fork_cleanup_semundo:
1269         exit_sem(p);
1270 bad_fork_cleanup_audit:
1271         audit_free(p);
1272 bad_fork_cleanup_security:
1273         security_task_free(p);
1274 bad_fork_cleanup_policy:
1275 #ifdef CONFIG_NUMA
1276         mpol_free(p->mempolicy);
1277 bad_fork_cleanup_cpuset:
1278 #endif
1279         cpuset_exit(p);
1280 bad_fork_cleanup:
1281         if (p->binfmt)
1282                 module_put(p->binfmt->module);
1283 bad_fork_cleanup_put_domain:
1284         module_put(task_thread_info(p)->exec_domain->module);
1285 bad_fork_cleanup_count:
1286         put_group_info(p->group_info);
1287         atomic_dec(&p->user->processes);
1288         free_uid(p->user);
1289 bad_fork_free:
1290         free_task(p);
1291 fork_out:
1292         return ERR_PTR(retval);
1293 }
1294
1295 struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1296 {
1297         memset(regs, 0, sizeof(struct pt_regs));
1298         return regs;
1299 }
1300
1301 struct task_struct * __devinit fork_idle(int cpu)
1302 {
1303         struct task_struct *task;
1304         struct pt_regs regs;
1305
1306         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
1307         if (!task)
1308                 return ERR_PTR(-ENOMEM);
1309         init_idle(task, cpu);
1310
1311         return task;
1312 }
1313
1314 static inline int fork_traceflag (unsigned clone_flags)
1315 {
1316         if (clone_flags & CLONE_UNTRACED)
1317                 return 0;
1318         else if (clone_flags & CLONE_VFORK) {
1319                 if (current->ptrace & PT_TRACE_VFORK)
1320                         return PTRACE_EVENT_VFORK;
1321         } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1322                 if (current->ptrace & PT_TRACE_CLONE)
1323                         return PTRACE_EVENT_CLONE;
1324         } else if (current->ptrace & PT_TRACE_FORK)
1325                 return PTRACE_EVENT_FORK;
1326
1327         return 0;
1328 }
1329
1330 /*
1331  *  Ok, this is the main fork-routine.
1332  *
1333  * It copies the process, and if successful kick-starts
1334  * it and waits for it to finish using the VM if required.
1335  */
1336 long do_fork(unsigned long clone_flags,
1337               unsigned long stack_start,
1338               struct pt_regs *regs,
1339               unsigned long stack_size,
1340               int __user *parent_tidptr,
1341               int __user *child_tidptr)
1342 {
1343         struct task_struct *p;
1344         int trace = 0;
1345         struct pid *pid = alloc_pid();
1346         long nr;
1347
1348         if (!pid)
1349                 return -EAGAIN;
1350         nr = pid->nr;
1351         if (unlikely(current->ptrace)) {
1352                 trace = fork_traceflag (clone_flags);
1353                 if (trace)
1354                         clone_flags |= CLONE_PTRACE;
1355         }
1356
1357         p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
1358         /*
1359          * Do this prior waking up the new thread - the thread pointer
1360          * might get invalid after that point, if the thread exits quickly.
1361          */
1362         if (!IS_ERR(p)) {
1363                 struct completion vfork;
1364
1365                 if (clone_flags & CLONE_VFORK) {
1366                         p->vfork_done = &vfork;
1367                         init_completion(&vfork);
1368                 }
1369
1370                 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1371                         /*
1372                          * We'll start up with an immediate SIGSTOP.
1373                          */
1374                         sigaddset(&p->pending.signal, SIGSTOP);
1375                         set_tsk_thread_flag(p, TIF_SIGPENDING);
1376                 }
1377
1378                 if (!(clone_flags & CLONE_STOPPED))
1379                         wake_up_new_task(p, clone_flags);
1380                 else
1381                         p->state = TASK_STOPPED;
1382
1383                 if (unlikely (trace)) {
1384                         current->ptrace_message = nr;
1385                         ptrace_notify ((trace << 8) | SIGTRAP);
1386                 }
1387
1388                 if (clone_flags & CLONE_VFORK) {
1389                         wait_for_completion(&vfork);
1390                         if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
1391                                 current->ptrace_message = nr;
1392                                 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1393                         }
1394                 }
1395         } else {
1396                 free_pid(pid);
1397                 nr = PTR_ERR(p);
1398         }
1399         return nr;
1400 }
1401
1402 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1403 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1404 #endif
1405
1406 static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
1407 {
1408         struct sighand_struct *sighand = data;
1409
1410         if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
1411                                         SLAB_CTOR_CONSTRUCTOR)
1412                 spin_lock_init(&sighand->siglock);
1413 }
1414
1415 void __init proc_caches_init(void)
1416 {
1417         sighand_cachep = kmem_cache_create("sighand_cache",
1418                         sizeof(struct sighand_struct), 0,
1419                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1420                         sighand_ctor, NULL);
1421         signal_cachep = kmem_cache_create("signal_cache",
1422                         sizeof(struct signal_struct), 0,
1423                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1424         files_cachep = kmem_cache_create("files_cache", 
1425                         sizeof(struct files_struct), 0,
1426                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1427         fs_cachep = kmem_cache_create("fs_cache", 
1428                         sizeof(struct fs_struct), 0,
1429                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1430         vm_area_cachep = kmem_cache_create("vm_area_struct",
1431                         sizeof(struct vm_area_struct), 0,
1432                         SLAB_PANIC, NULL, NULL);
1433         mm_cachep = kmem_cache_create("mm_struct",
1434                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1435                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1436 }
1437
1438
1439 /*
1440  * Check constraints on flags passed to the unshare system call and
1441  * force unsharing of additional process context as appropriate.
1442  */
1443 static inline void check_unshare_flags(unsigned long *flags_ptr)
1444 {
1445         /*
1446          * If unsharing a thread from a thread group, must also
1447          * unshare vm.
1448          */
1449         if (*flags_ptr & CLONE_THREAD)
1450                 *flags_ptr |= CLONE_VM;
1451
1452         /*
1453          * If unsharing vm, must also unshare signal handlers.
1454          */
1455         if (*flags_ptr & CLONE_VM)
1456                 *flags_ptr |= CLONE_SIGHAND;
1457
1458         /*
1459          * If unsharing signal handlers and the task was created
1460          * using CLONE_THREAD, then must unshare the thread
1461          */
1462         if ((*flags_ptr & CLONE_SIGHAND) &&
1463             (atomic_read(&current->signal->count) > 1))
1464                 *flags_ptr |= CLONE_THREAD;
1465
1466         /*
1467          * If unsharing namespace, must also unshare filesystem information.
1468          */
1469         if (*flags_ptr & CLONE_NEWNS)
1470                 *flags_ptr |= CLONE_FS;
1471 }
1472
1473 /*
1474  * Unsharing of tasks created with CLONE_THREAD is not supported yet
1475  */
1476 static int unshare_thread(unsigned long unshare_flags)
1477 {
1478         if (unshare_flags & CLONE_THREAD)
1479                 return -EINVAL;
1480
1481         return 0;
1482 }
1483
1484 /*
1485  * Unshare the filesystem structure if it is being shared
1486  */
1487 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1488 {
1489         struct fs_struct *fs = current->fs;
1490
1491         if ((unshare_flags & CLONE_FS) &&
1492             (fs && atomic_read(&fs->count) > 1)) {
1493                 *new_fsp = __copy_fs_struct(current->fs);
1494                 if (!*new_fsp)
1495                         return -ENOMEM;
1496         }
1497
1498         return 0;
1499 }
1500
1501 /*
1502  * Unshare the namespace structure if it is being shared
1503  */
1504 static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs)
1505 {
1506         struct namespace *ns = current->namespace;
1507
1508         if ((unshare_flags & CLONE_NEWNS) &&
1509             (ns && atomic_read(&ns->count) > 1)) {
1510                 if (!capable(CAP_SYS_ADMIN))
1511                         return -EPERM;
1512
1513                 *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs);
1514                 if (!*new_nsp)
1515                         return -ENOMEM;
1516         }
1517
1518         return 0;
1519 }
1520
1521 /*
1522  * Unsharing of sighand for tasks created with CLONE_SIGHAND is not
1523  * supported yet
1524  */
1525 static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1526 {
1527         struct sighand_struct *sigh = current->sighand;
1528
1529         if ((unshare_flags & CLONE_SIGHAND) &&
1530             (sigh && atomic_read(&sigh->count) > 1))
1531                 return -EINVAL;
1532         else
1533                 return 0;
1534 }
1535
1536 /*
1537  * Unshare vm if it is being shared
1538  */
1539 static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1540 {
1541         struct mm_struct *mm = current->mm;
1542
1543         if ((unshare_flags & CLONE_VM) &&
1544             (mm && atomic_read(&mm->mm_users) > 1)) {
1545                 return -EINVAL;
1546         }
1547
1548         return 0;
1549 }
1550
1551 /*
1552  * Unshare file descriptor table if it is being shared
1553  */
1554 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1555 {
1556         struct files_struct *fd = current->files;
1557         int error = 0;
1558
1559         if ((unshare_flags & CLONE_FILES) &&
1560             (fd && atomic_read(&fd->count) > 1)) {
1561                 *new_fdp = dup_fd(fd, &error);
1562                 if (!*new_fdp)
1563                         return error;
1564         }
1565
1566         return 0;
1567 }
1568
1569 /*
1570  * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1571  * supported yet
1572  */
1573 static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1574 {
1575         if (unshare_flags & CLONE_SYSVSEM)
1576                 return -EINVAL;
1577
1578         return 0;
1579 }
1580
1581 /*
1582  * unshare allows a process to 'unshare' part of the process
1583  * context which was originally shared using clone.  copy_*
1584  * functions used by do_fork() cannot be used here directly
1585  * because they modify an inactive task_struct that is being
1586  * constructed. Here we are modifying the current, active,
1587  * task_struct.
1588  */
1589 asmlinkage long sys_unshare(unsigned long unshare_flags)
1590 {
1591         int err = 0;
1592         struct fs_struct *fs, *new_fs = NULL;
1593         struct namespace *ns, *new_ns = NULL;
1594         struct sighand_struct *sigh, *new_sigh = NULL;
1595         struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1596         struct files_struct *fd, *new_fd = NULL;
1597         struct sem_undo_list *new_ulist = NULL;
1598
1599         check_unshare_flags(&unshare_flags);
1600
1601         /* Return -EINVAL for all unsupported flags */
1602         err = -EINVAL;
1603         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1604                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM))
1605                 goto bad_unshare_out;
1606
1607         if ((err = unshare_thread(unshare_flags)))
1608                 goto bad_unshare_out;
1609         if ((err = unshare_fs(unshare_flags, &new_fs)))
1610                 goto bad_unshare_cleanup_thread;
1611         if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs)))
1612                 goto bad_unshare_cleanup_fs;
1613         if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1614                 goto bad_unshare_cleanup_ns;
1615         if ((err = unshare_vm(unshare_flags, &new_mm)))
1616                 goto bad_unshare_cleanup_sigh;
1617         if ((err = unshare_fd(unshare_flags, &new_fd)))
1618                 goto bad_unshare_cleanup_vm;
1619         if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1620                 goto bad_unshare_cleanup_fd;
1621
1622         if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist) {
1623
1624                 task_lock(current);
1625
1626                 if (new_fs) {
1627                         fs = current->fs;
1628                         current->fs = new_fs;
1629                         new_fs = fs;
1630                 }
1631
1632                 if (new_ns) {
1633                         ns = current->namespace;
1634                         current->namespace = new_ns;
1635                         new_ns = ns;
1636                 }
1637
1638                 if (new_sigh) {
1639                         sigh = current->sighand;
1640                         rcu_assign_pointer(current->sighand, new_sigh);
1641                         new_sigh = sigh;
1642                 }
1643
1644                 if (new_mm) {
1645                         mm = current->mm;
1646                         active_mm = current->active_mm;
1647                         current->mm = new_mm;
1648                         current->active_mm = new_mm;
1649                         activate_mm(active_mm, new_mm);
1650                         new_mm = mm;
1651                 }
1652
1653                 if (new_fd) {
1654                         fd = current->files;
1655                         current->files = new_fd;
1656                         new_fd = fd;
1657                 }
1658
1659                 task_unlock(current);
1660         }
1661
1662 bad_unshare_cleanup_fd:
1663         if (new_fd)
1664                 put_files_struct(new_fd);
1665
1666 bad_unshare_cleanup_vm:
1667         if (new_mm)
1668                 mmput(new_mm);
1669
1670 bad_unshare_cleanup_sigh:
1671         if (new_sigh)
1672                 if (atomic_dec_and_test(&new_sigh->count))
1673                         kmem_cache_free(sighand_cachep, new_sigh);
1674
1675 bad_unshare_cleanup_ns:
1676         if (new_ns)
1677                 put_namespace(new_ns);
1678
1679 bad_unshare_cleanup_fs:
1680         if (new_fs)
1681                 put_fs_struct(new_fs);
1682
1683 bad_unshare_cleanup_thread:
1684 bad_unshare_out:
1685         return err;
1686 }