4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/a.out.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/highmem.h>
35 #include <linux/spinlock.h>
36 #include <linux/key.h>
37 #include <linux/personality.h>
38 #include <linux/binfmts.h>
39 #include <linux/swap.h>
40 #include <linux/utsname.h>
41 #include <linux/pid_namespace.h>
42 #include <linux/module.h>
43 #include <linux/namei.h>
44 #include <linux/proc_fs.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/rmap.h>
50 #include <linux/tsacct_kern.h>
51 #include <linux/cn_proc.h>
52 #include <linux/audit.h>
53 #include <linux/signalfd.h>
55 #include <asm/uaccess.h>
56 #include <asm/mmu_context.h>
60 #include <linux/kmod.h>
64 char core_pattern[CORENAME_MAX_SIZE] = "core";
65 int suid_dumpable = 0;
67 EXPORT_SYMBOL(suid_dumpable);
68 /* The maximal length of core_pattern is also specified in sysctl.c */
70 static struct linux_binfmt *formats;
71 static DEFINE_RWLOCK(binfmt_lock);
73 int register_binfmt(struct linux_binfmt * fmt)
75 struct linux_binfmt ** tmp = &formats;
81 write_lock(&binfmt_lock);
84 write_unlock(&binfmt_lock);
91 write_unlock(&binfmt_lock);
95 EXPORT_SYMBOL(register_binfmt);
97 int unregister_binfmt(struct linux_binfmt * fmt)
99 struct linux_binfmt ** tmp = &formats;
101 write_lock(&binfmt_lock);
106 write_unlock(&binfmt_lock);
111 write_unlock(&binfmt_lock);
115 EXPORT_SYMBOL(unregister_binfmt);
117 static inline void put_binfmt(struct linux_binfmt * fmt)
119 module_put(fmt->module);
123 * Note that a shared library must be both readable and executable due to
126 * Also note that we take the address to load from from the file itself.
128 asmlinkage long sys_uselib(const char __user * library)
134 error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
139 if (nd.mnt->mnt_flags & MNT_NOEXEC)
142 if (!S_ISREG(nd.dentry->d_inode->i_mode))
145 error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
149 file = nameidata_to_filp(&nd, O_RDONLY);
150 error = PTR_ERR(file);
156 struct linux_binfmt * fmt;
158 read_lock(&binfmt_lock);
159 for (fmt = formats ; fmt ; fmt = fmt->next) {
160 if (!fmt->load_shlib)
162 if (!try_module_get(fmt->module))
164 read_unlock(&binfmt_lock);
165 error = fmt->load_shlib(file);
166 read_lock(&binfmt_lock);
168 if (error != -ENOEXEC)
171 read_unlock(&binfmt_lock);
177 release_open_intent(&nd);
184 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
190 #ifdef CONFIG_STACK_GROWSUP
192 ret = expand_stack_downwards(bprm->vma, pos);
197 ret = get_user_pages(current, bprm->mm, pos,
198 1, write, 1, &page, NULL);
203 struct rlimit *rlim = current->signal->rlim;
204 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
207 * Limit to 1/4-th the stack size for the argv+env strings.
209 * - the remaining binfmt code will not run out of stack space,
210 * - the program will have a reasonable amount of stack left
213 if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
222 static void put_arg_page(struct page *page)
227 static void free_arg_page(struct linux_binprm *bprm, int i)
231 static void free_arg_pages(struct linux_binprm *bprm)
235 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
238 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
241 static int __bprm_mm_init(struct linux_binprm *bprm)
244 struct vm_area_struct *vma = NULL;
245 struct mm_struct *mm = bprm->mm;
247 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
251 down_write(&mm->mmap_sem);
255 * Place the stack at the largest stack address the architecture
256 * supports. Later, we'll move this to an appropriate place. We don't
257 * use STACK_TOP because that can depend on attributes which aren't
260 vma->vm_end = STACK_TOP_MAX;
261 vma->vm_start = vma->vm_end - PAGE_SIZE;
263 vma->vm_flags = VM_STACK_FLAGS;
264 vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
265 err = insert_vm_struct(mm, vma);
267 up_write(&mm->mmap_sem);
271 mm->stack_vm = mm->total_vm = 1;
272 up_write(&mm->mmap_sem);
274 bprm->p = vma->vm_end - sizeof(void *);
281 kmem_cache_free(vm_area_cachep, vma);
287 static bool valid_arg_len(struct linux_binprm *bprm, long len)
289 return len <= MAX_ARG_STRLEN;
294 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
299 page = bprm->page[pos / PAGE_SIZE];
300 if (!page && write) {
301 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
304 bprm->page[pos / PAGE_SIZE] = page;
310 static void put_arg_page(struct page *page)
314 static void free_arg_page(struct linux_binprm *bprm, int i)
317 __free_page(bprm->page[i]);
318 bprm->page[i] = NULL;
322 static void free_arg_pages(struct linux_binprm *bprm)
326 for (i = 0; i < MAX_ARG_PAGES; i++)
327 free_arg_page(bprm, i);
330 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
335 static int __bprm_mm_init(struct linux_binprm *bprm)
337 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
341 static bool valid_arg_len(struct linux_binprm *bprm, long len)
343 return len <= bprm->p;
346 #endif /* CONFIG_MMU */
349 * Create a new mm_struct and populate it with a temporary stack
350 * vm_area_struct. We don't have enough context at this point to set the stack
351 * flags, permissions, and offset, so we use temporary values. We'll update
352 * them later in setup_arg_pages().
354 int bprm_mm_init(struct linux_binprm *bprm)
357 struct mm_struct *mm = NULL;
359 bprm->mm = mm = mm_alloc();
364 err = init_new_context(current, mm);
368 err = __bprm_mm_init(bprm);
384 * count() counts the number of strings in array ARGV.
386 static int count(char __user * __user * argv, int max)
394 if (get_user(p, argv))
408 * 'copy_strings()' copies argument/environment strings from the old
409 * processes's memory to the new process's stack. The call to get_user_pages()
410 * ensures the destination page is created and not swapped out.
412 static int copy_strings(int argc, char __user * __user * argv,
413 struct linux_binprm *bprm)
415 struct page *kmapped_page = NULL;
417 unsigned long kpos = 0;
425 if (get_user(str, argv+argc) ||
426 !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
431 if (!valid_arg_len(bprm, len)) {
436 /* We're going to work our way backwords. */
442 int offset, bytes_to_copy;
444 offset = pos % PAGE_SIZE;
448 bytes_to_copy = offset;
449 if (bytes_to_copy > len)
452 offset -= bytes_to_copy;
453 pos -= bytes_to_copy;
454 str -= bytes_to_copy;
455 len -= bytes_to_copy;
457 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
460 page = get_arg_page(bprm, pos, 1);
467 flush_kernel_dcache_page(kmapped_page);
468 kunmap(kmapped_page);
469 put_arg_page(kmapped_page);
472 kaddr = kmap(kmapped_page);
473 kpos = pos & PAGE_MASK;
474 flush_arg_page(bprm, kpos, kmapped_page);
476 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
485 flush_kernel_dcache_page(kmapped_page);
486 kunmap(kmapped_page);
487 put_arg_page(kmapped_page);
493 * Like copy_strings, but get argv and its values from kernel memory.
495 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
498 mm_segment_t oldfs = get_fs();
500 r = copy_strings(argc, (char __user * __user *)argv, bprm);
504 EXPORT_SYMBOL(copy_strings_kernel);
509 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
510 * the binfmt code determines where the new stack should reside, we shift it to
511 * its final location. The process proceeds as follows:
513 * 1) Use shift to calculate the new vma endpoints.
514 * 2) Extend vma to cover both the old and new ranges. This ensures the
515 * arguments passed to subsequent functions are consistent.
516 * 3) Move vma's page tables to the new range.
517 * 4) Free up any cleared pgd range.
518 * 5) Shrink the vma to cover only the new range.
520 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
522 struct mm_struct *mm = vma->vm_mm;
523 unsigned long old_start = vma->vm_start;
524 unsigned long old_end = vma->vm_end;
525 unsigned long length = old_end - old_start;
526 unsigned long new_start = old_start - shift;
527 unsigned long new_end = old_end - shift;
528 struct mmu_gather *tlb;
530 BUG_ON(new_start > new_end);
533 * ensure there are no vmas between where we want to go
536 if (vma != find_vma(mm, new_start))
540 * cover the whole range: [new_start, old_end)
542 vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
545 * move the page tables downwards, on failure we rely on
546 * process cleanup to remove whatever mess we made.
548 if (length != move_page_tables(vma, old_start,
549 vma, new_start, length))
553 tlb = tlb_gather_mmu(mm, 0);
554 if (new_end > old_start) {
556 * when the old and new regions overlap clear from new_end.
558 free_pgd_range(&tlb, new_end, old_end, new_end,
559 vma->vm_next ? vma->vm_next->vm_start : 0);
562 * otherwise, clean from old_start; this is done to not touch
563 * the address space in [new_end, old_start) some architectures
564 * have constraints on va-space that make this illegal (IA64) -
565 * for the others its just a little faster.
567 free_pgd_range(&tlb, old_start, old_end, new_end,
568 vma->vm_next ? vma->vm_next->vm_start : 0);
570 tlb_finish_mmu(tlb, new_end, old_end);
573 * shrink the vma to just the new range.
575 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
580 #define EXTRA_STACK_VM_PAGES 20 /* random */
583 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
584 * the stack is optionally relocated, and some extra space is added.
586 int setup_arg_pages(struct linux_binprm *bprm,
587 unsigned long stack_top,
588 int executable_stack)
591 unsigned long stack_shift;
592 struct mm_struct *mm = current->mm;
593 struct vm_area_struct *vma = bprm->vma;
594 struct vm_area_struct *prev = NULL;
595 unsigned long vm_flags;
596 unsigned long stack_base;
598 #ifdef CONFIG_STACK_GROWSUP
599 /* Limit stack size to 1GB */
600 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
601 if (stack_base > (1 << 30))
602 stack_base = 1 << 30;
604 /* Make sure we didn't let the argument array grow too large. */
605 if (vma->vm_end - vma->vm_start > stack_base)
608 stack_base = PAGE_ALIGN(stack_top - stack_base);
610 stack_shift = vma->vm_start - stack_base;
611 mm->arg_start = bprm->p - stack_shift;
612 bprm->p = vma->vm_end - stack_shift;
614 stack_top = arch_align_stack(stack_top);
615 stack_top = PAGE_ALIGN(stack_top);
616 stack_shift = vma->vm_end - stack_top;
618 bprm->p -= stack_shift;
619 mm->arg_start = bprm->p;
623 bprm->loader -= stack_shift;
624 bprm->exec -= stack_shift;
626 down_write(&mm->mmap_sem);
627 vm_flags = vma->vm_flags;
630 * Adjust stack execute permissions; explicitly enable for
631 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
632 * (arch default) otherwise.
634 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
636 else if (executable_stack == EXSTACK_DISABLE_X)
637 vm_flags &= ~VM_EXEC;
638 vm_flags |= mm->def_flags;
640 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
646 /* Move stack pages down in memory. */
648 ret = shift_arg_pages(vma, stack_shift);
650 up_write(&mm->mmap_sem);
655 #ifdef CONFIG_STACK_GROWSUP
656 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
658 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
660 ret = expand_stack(vma, stack_base);
665 up_write(&mm->mmap_sem);
668 EXPORT_SYMBOL(setup_arg_pages);
670 #endif /* CONFIG_MMU */
672 struct file *open_exec(const char *name)
678 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
682 struct inode *inode = nd.dentry->d_inode;
683 file = ERR_PTR(-EACCES);
684 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
685 S_ISREG(inode->i_mode)) {
686 int err = vfs_permission(&nd, MAY_EXEC);
689 file = nameidata_to_filp(&nd, O_RDONLY);
691 err = deny_write_access(file);
701 release_open_intent(&nd);
707 EXPORT_SYMBOL(open_exec);
709 int kernel_read(struct file *file, unsigned long offset,
710 char *addr, unsigned long count)
718 /* The cast to a user pointer is valid due to the set_fs() */
719 result = vfs_read(file, (void __user *)addr, count, &pos);
724 EXPORT_SYMBOL(kernel_read);
726 static int exec_mmap(struct mm_struct *mm)
728 struct task_struct *tsk;
729 struct mm_struct * old_mm, *active_mm;
731 /* Notify parent that we're no longer interested in the old VM */
733 old_mm = current->mm;
734 mm_release(tsk, old_mm);
738 * Make sure that if there is a core dump in progress
739 * for the old mm, we get out and die instead of going
740 * through with the exec. We must hold mmap_sem around
741 * checking core_waiters and changing tsk->mm. The
742 * core-inducing thread will increment core_waiters for
743 * each thread whose ->mm == old_mm.
745 down_read(&old_mm->mmap_sem);
746 if (unlikely(old_mm->core_waiters)) {
747 up_read(&old_mm->mmap_sem);
752 active_mm = tsk->active_mm;
755 activate_mm(active_mm, mm);
757 arch_pick_mmap_layout(mm);
759 up_read(&old_mm->mmap_sem);
760 BUG_ON(active_mm != old_mm);
769 * This function makes sure the current process has its own signal table,
770 * so that flush_signal_handlers can later reset the handlers without
771 * disturbing other processes. (Other processes might share the signal
772 * table via the CLONE_SIGHAND option to clone().)
774 static int de_thread(struct task_struct *tsk)
776 struct signal_struct *sig = tsk->signal;
777 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
778 spinlock_t *lock = &oldsighand->siglock;
779 struct task_struct *leader = NULL;
783 * Tell all the sighand listeners that this sighand has
784 * been detached. The signalfd_detach() function grabs the
785 * sighand lock, if signal listeners are present on the sighand.
787 signalfd_detach(tsk);
790 * If we don't share sighandlers, then we aren't sharing anything
791 * and we can just re-use it all.
793 if (atomic_read(&oldsighand->count) <= 1) {
794 BUG_ON(atomic_read(&sig->count) != 1);
799 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
803 if (thread_group_empty(tsk))
804 goto no_thread_group;
807 * Kill all other threads in the thread group.
808 * We must hold tasklist_lock to call zap_other_threads.
810 read_lock(&tasklist_lock);
812 if (sig->flags & SIGNAL_GROUP_EXIT) {
814 * Another group action in progress, just
815 * return so that the signal is processed.
817 spin_unlock_irq(lock);
818 read_unlock(&tasklist_lock);
819 kmem_cache_free(sighand_cachep, newsighand);
824 * child_reaper ignores SIGKILL, change it now.
825 * Reparenting needs write_lock on tasklist_lock,
826 * so it is safe to do it under read_lock.
828 if (unlikely(tsk->group_leader == child_reaper(tsk)))
829 tsk->nsproxy->pid_ns->child_reaper = tsk;
831 zap_other_threads(tsk);
832 read_unlock(&tasklist_lock);
835 * Account for the thread group leader hanging around:
838 if (!thread_group_leader(tsk)) {
841 * The SIGALRM timer survives the exec, but needs to point
842 * at us as the new group leader now. We have a race with
843 * a timer firing now getting the old leader, so we need to
844 * synchronize with any firing (by calling del_timer_sync)
845 * before we can safely let the old group leader die.
848 spin_unlock_irq(lock);
849 if (hrtimer_cancel(&sig->real_timer))
850 hrtimer_restart(&sig->real_timer);
853 while (atomic_read(&sig->count) > count) {
854 sig->group_exit_task = tsk;
855 sig->notify_count = count;
856 __set_current_state(TASK_UNINTERRUPTIBLE);
857 spin_unlock_irq(lock);
861 sig->group_exit_task = NULL;
862 sig->notify_count = 0;
863 spin_unlock_irq(lock);
866 * At this point all other threads have exited, all we have to
867 * do is to wait for the thread group leader to become inactive,
868 * and to assume its PID:
870 if (!thread_group_leader(tsk)) {
872 * Wait for the thread group leader to be a zombie.
873 * It should already be zombie at this point, most
876 leader = tsk->group_leader;
877 while (leader->exit_state != EXIT_ZOMBIE)
881 * The only record we have of the real-time age of a
882 * process, regardless of execs it's done, is start_time.
883 * All the past CPU time is accumulated in signal_struct
884 * from sister threads now dead. But in this non-leader
885 * exec, nothing survives from the original leader thread,
886 * whose birth marks the true age of this process now.
887 * When we take on its identity by switching to its PID, we
888 * also take its birthdate (always earlier than our own).
890 tsk->start_time = leader->start_time;
892 write_lock_irq(&tasklist_lock);
894 BUG_ON(leader->tgid != tsk->tgid);
895 BUG_ON(tsk->pid == tsk->tgid);
897 * An exec() starts a new thread group with the
898 * TGID of the previous thread group. Rehash the
899 * two threads with a switched PID, and release
900 * the former thread group leader:
903 /* Become a process group leader with the old leader's pid.
904 * The old leader becomes a thread of the this thread group.
905 * Note: The old leader also uses this pid until release_task
906 * is called. Odd but simple and correct.
908 detach_pid(tsk, PIDTYPE_PID);
909 tsk->pid = leader->pid;
910 attach_pid(tsk, PIDTYPE_PID, find_pid(tsk->pid));
911 transfer_pid(leader, tsk, PIDTYPE_PGID);
912 transfer_pid(leader, tsk, PIDTYPE_SID);
913 list_replace_rcu(&leader->tasks, &tsk->tasks);
915 tsk->group_leader = tsk;
916 leader->group_leader = tsk;
918 tsk->exit_signal = SIGCHLD;
920 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
921 leader->exit_state = EXIT_DEAD;
923 write_unlock_irq(&tasklist_lock);
927 * There may be one thread left which is just exiting,
928 * but it's safe to stop telling the group to kill themselves.
935 release_task(leader);
937 BUG_ON(atomic_read(&sig->count) != 1);
939 if (atomic_read(&oldsighand->count) == 1) {
941 * Now that we nuked the rest of the thread group,
942 * it turns out we are not sharing sighand any more either.
943 * So we can just keep it.
945 kmem_cache_free(sighand_cachep, newsighand);
948 * Move our state over to newsighand and switch it in.
950 atomic_set(&newsighand->count, 1);
951 memcpy(newsighand->action, oldsighand->action,
952 sizeof(newsighand->action));
954 write_lock_irq(&tasklist_lock);
955 spin_lock(&oldsighand->siglock);
956 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
958 rcu_assign_pointer(tsk->sighand, newsighand);
961 spin_unlock(&newsighand->siglock);
962 spin_unlock(&oldsighand->siglock);
963 write_unlock_irq(&tasklist_lock);
965 __cleanup_sighand(oldsighand);
968 BUG_ON(!thread_group_leader(tsk));
973 * These functions flushes out all traces of the currently running executable
974 * so that a new one can be started
977 static void flush_old_files(struct files_struct * files)
982 spin_lock(&files->file_lock);
984 unsigned long set, i;
988 fdt = files_fdtable(files);
989 if (i >= fdt->max_fds)
991 set = fdt->close_on_exec->fds_bits[j];
994 fdt->close_on_exec->fds_bits[j] = 0;
995 spin_unlock(&files->file_lock);
996 for ( ; set ; i++,set >>= 1) {
1001 spin_lock(&files->file_lock);
1004 spin_unlock(&files->file_lock);
1007 void get_task_comm(char *buf, struct task_struct *tsk)
1009 /* buf must be at least sizeof(tsk->comm) in size */
1011 strncpy(buf, tsk->comm, sizeof(tsk->comm));
1015 void set_task_comm(struct task_struct *tsk, char *buf)
1018 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1022 int flush_old_exec(struct linux_binprm * bprm)
1026 struct files_struct *files;
1027 char tcomm[sizeof(current->comm)];
1030 * Make sure we have a private signal table and that
1031 * we are unassociated from the previous thread group.
1033 retval = de_thread(current);
1038 * Make sure we have private file handles. Ask the
1039 * fork helper to do the work for us and the exit
1040 * helper to do the cleanup of the old one.
1042 files = current->files; /* refcounted so safe to hold */
1043 retval = unshare_files();
1047 * Release all of the old mmap stuff
1049 retval = exec_mmap(bprm->mm);
1053 bprm->mm = NULL; /* We're using it now */
1055 /* This is the point of no return */
1056 put_files_struct(files);
1058 current->sas_ss_sp = current->sas_ss_size = 0;
1060 if (current->euid == current->uid && current->egid == current->gid)
1061 set_dumpable(current->mm, 1);
1063 set_dumpable(current->mm, suid_dumpable);
1065 name = bprm->filename;
1067 /* Copies the binary name from after last slash */
1068 for (i=0; (ch = *(name++)) != '\0';) {
1070 i = 0; /* overwrite what we wrote */
1072 if (i < (sizeof(tcomm) - 1))
1076 set_task_comm(current, tcomm);
1078 current->flags &= ~PF_RANDOMIZE;
1081 /* Set the new mm task size. We have to do that late because it may
1082 * depend on TIF_32BIT which is only updated in flush_thread() on
1083 * some architectures like powerpc
1085 current->mm->task_size = TASK_SIZE;
1087 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
1088 file_permission(bprm->file, MAY_READ) ||
1089 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
1091 set_dumpable(current->mm, suid_dumpable);
1094 /* An exec changes our domain. We are no longer part of the thread
1097 current->self_exec_id++;
1099 flush_signal_handlers(current, 0);
1100 flush_old_files(current->files);
1105 reset_files_struct(current, files);
1110 EXPORT_SYMBOL(flush_old_exec);
1113 * Fill the binprm structure from the inode.
1114 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1116 int prepare_binprm(struct linux_binprm *bprm)
1119 struct inode * inode = bprm->file->f_path.dentry->d_inode;
1122 mode = inode->i_mode;
1123 if (bprm->file->f_op == NULL)
1126 bprm->e_uid = current->euid;
1127 bprm->e_gid = current->egid;
1129 if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
1131 if (mode & S_ISUID) {
1132 current->personality &= ~PER_CLEAR_ON_SETID;
1133 bprm->e_uid = inode->i_uid;
1138 * If setgid is set but no group execute bit then this
1139 * is a candidate for mandatory locking, not a setgid
1142 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1143 current->personality &= ~PER_CLEAR_ON_SETID;
1144 bprm->e_gid = inode->i_gid;
1148 /* fill in binprm security blob */
1149 retval = security_bprm_set(bprm);
1153 memset(bprm->buf,0,BINPRM_BUF_SIZE);
1154 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
1157 EXPORT_SYMBOL(prepare_binprm);
1159 static int unsafe_exec(struct task_struct *p)
1162 if (p->ptrace & PT_PTRACED) {
1163 if (p->ptrace & PT_PTRACE_CAP)
1164 unsafe |= LSM_UNSAFE_PTRACE_CAP;
1166 unsafe |= LSM_UNSAFE_PTRACE;
1168 if (atomic_read(&p->fs->count) > 1 ||
1169 atomic_read(&p->files->count) > 1 ||
1170 atomic_read(&p->sighand->count) > 1)
1171 unsafe |= LSM_UNSAFE_SHARE;
1176 void compute_creds(struct linux_binprm *bprm)
1180 if (bprm->e_uid != current->uid)
1185 unsafe = unsafe_exec(current);
1186 security_bprm_apply_creds(bprm, unsafe);
1187 task_unlock(current);
1188 security_bprm_post_apply_creds(bprm);
1190 EXPORT_SYMBOL(compute_creds);
1193 * Arguments are '\0' separated strings found at the location bprm->p
1194 * points to; chop off the first by relocating brpm->p to right after
1195 * the first '\0' encountered.
1197 int remove_arg_zero(struct linux_binprm *bprm)
1200 unsigned long offset;
1208 offset = bprm->p & ~PAGE_MASK;
1209 page = get_arg_page(bprm, bprm->p, 0);
1214 kaddr = kmap_atomic(page, KM_USER0);
1216 for (; offset < PAGE_SIZE && kaddr[offset];
1217 offset++, bprm->p++)
1220 kunmap_atomic(kaddr, KM_USER0);
1223 if (offset == PAGE_SIZE)
1224 free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1225 } while (offset == PAGE_SIZE);
1234 EXPORT_SYMBOL(remove_arg_zero);
1237 * cycle the list of binary formats handler, until one recognizes the image
1239 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1242 struct linux_binfmt *fmt;
1244 /* handle /sbin/loader.. */
1246 struct exec * eh = (struct exec *) bprm->buf;
1248 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1249 (eh->fh.f_flags & 0x3000) == 0x3000)
1252 unsigned long loader;
1254 allow_write_access(bprm->file);
1258 loader = bprm->vma->vm_end - sizeof(void *);
1260 file = open_exec("/sbin/loader");
1261 retval = PTR_ERR(file);
1265 /* Remember if the application is TASO. */
1266 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1269 bprm->loader = loader;
1270 retval = prepare_binprm(bprm);
1273 /* should call search_binary_handler recursively here,
1274 but it does not matter */
1278 retval = security_bprm_check(bprm);
1282 /* kernel module loader fixup */
1283 /* so we don't try to load run modprobe in kernel space. */
1286 retval = audit_bprm(bprm);
1291 for (try=0; try<2; try++) {
1292 read_lock(&binfmt_lock);
1293 for (fmt = formats ; fmt ; fmt = fmt->next) {
1294 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1297 if (!try_module_get(fmt->module))
1299 read_unlock(&binfmt_lock);
1300 retval = fn(bprm, regs);
1303 allow_write_access(bprm->file);
1307 current->did_exec = 1;
1308 proc_exec_connector(current);
1311 read_lock(&binfmt_lock);
1313 if (retval != -ENOEXEC || bprm->mm == NULL)
1316 read_unlock(&binfmt_lock);
1320 read_unlock(&binfmt_lock);
1321 if (retval != -ENOEXEC || bprm->mm == NULL) {
1325 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1326 if (printable(bprm->buf[0]) &&
1327 printable(bprm->buf[1]) &&
1328 printable(bprm->buf[2]) &&
1329 printable(bprm->buf[3]))
1330 break; /* -ENOEXEC */
1331 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1338 EXPORT_SYMBOL(search_binary_handler);
1341 * sys_execve() executes a new program.
1343 int do_execve(char * filename,
1344 char __user *__user *argv,
1345 char __user *__user *envp,
1346 struct pt_regs * regs)
1348 struct linux_binprm *bprm;
1350 unsigned long env_p;
1354 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1358 file = open_exec(filename);
1359 retval = PTR_ERR(file);
1366 bprm->filename = filename;
1367 bprm->interp = filename;
1369 retval = bprm_mm_init(bprm);
1373 bprm->argc = count(argv, MAX_ARG_STRINGS);
1374 if ((retval = bprm->argc) < 0)
1377 bprm->envc = count(envp, MAX_ARG_STRINGS);
1378 if ((retval = bprm->envc) < 0)
1381 retval = security_bprm_alloc(bprm);
1385 retval = prepare_binprm(bprm);
1389 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1393 bprm->exec = bprm->p;
1394 retval = copy_strings(bprm->envc, envp, bprm);
1399 retval = copy_strings(bprm->argc, argv, bprm);
1402 bprm->argv_len = env_p - bprm->p;
1404 retval = search_binary_handler(bprm,regs);
1406 /* execve success */
1407 free_arg_pages(bprm);
1408 security_bprm_free(bprm);
1409 acct_update_integrals(current);
1415 free_arg_pages(bprm);
1417 security_bprm_free(bprm);
1425 allow_write_access(bprm->file);
1435 int set_binfmt(struct linux_binfmt *new)
1437 struct linux_binfmt *old = current->binfmt;
1440 if (!try_module_get(new->module))
1443 current->binfmt = new;
1445 module_put(old->module);
1449 EXPORT_SYMBOL(set_binfmt);
1451 /* format_corename will inspect the pattern parameter, and output a
1452 * name into corename, which must have space for at least
1453 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1455 static int format_corename(char *corename, const char *pattern, long signr)
1457 const char *pat_ptr = pattern;
1458 char *out_ptr = corename;
1459 char *const out_end = corename + CORENAME_MAX_SIZE;
1461 int pid_in_pattern = 0;
1464 if (*pattern == '|')
1467 /* Repeat as long as we have more pattern to process and more output
1470 if (*pat_ptr != '%') {
1471 if (out_ptr == out_end)
1473 *out_ptr++ = *pat_ptr++;
1475 switch (*++pat_ptr) {
1478 /* Double percent, output one percent */
1480 if (out_ptr == out_end)
1487 rc = snprintf(out_ptr, out_end - out_ptr,
1488 "%d", current->tgid);
1489 if (rc > out_end - out_ptr)
1495 rc = snprintf(out_ptr, out_end - out_ptr,
1496 "%d", current->uid);
1497 if (rc > out_end - out_ptr)
1503 rc = snprintf(out_ptr, out_end - out_ptr,
1504 "%d", current->gid);
1505 if (rc > out_end - out_ptr)
1509 /* signal that caused the coredump */
1511 rc = snprintf(out_ptr, out_end - out_ptr,
1513 if (rc > out_end - out_ptr)
1517 /* UNIX time of coredump */
1520 do_gettimeofday(&tv);
1521 rc = snprintf(out_ptr, out_end - out_ptr,
1523 if (rc > out_end - out_ptr)
1530 down_read(&uts_sem);
1531 rc = snprintf(out_ptr, out_end - out_ptr,
1532 "%s", utsname()->nodename);
1534 if (rc > out_end - out_ptr)
1540 rc = snprintf(out_ptr, out_end - out_ptr,
1541 "%s", current->comm);
1542 if (rc > out_end - out_ptr)
1552 /* Backward compatibility with core_uses_pid:
1554 * If core_pattern does not include a %p (as is the default)
1555 * and core_uses_pid is set, then .%pid will be appended to
1556 * the filename. Do not do this for piped commands. */
1557 if (!ispipe && !pid_in_pattern
1558 && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
1559 rc = snprintf(out_ptr, out_end - out_ptr,
1560 ".%d", current->tgid);
1561 if (rc > out_end - out_ptr)
1570 static void zap_process(struct task_struct *start)
1572 struct task_struct *t;
1574 start->signal->flags = SIGNAL_GROUP_EXIT;
1575 start->signal->group_stop_count = 0;
1579 if (t != current && t->mm) {
1580 t->mm->core_waiters++;
1581 sigaddset(&t->pending.signal, SIGKILL);
1582 signal_wake_up(t, 1);
1584 } while ((t = next_thread(t)) != start);
1587 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
1590 struct task_struct *g, *p;
1591 unsigned long flags;
1594 spin_lock_irq(&tsk->sighand->siglock);
1595 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
1596 tsk->signal->group_exit_code = exit_code;
1600 spin_unlock_irq(&tsk->sighand->siglock);
1604 if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
1608 for_each_process(g) {
1609 if (g == tsk->group_leader)
1617 * p->sighand can't disappear, but
1618 * may be changed by de_thread()
1620 lock_task_sighand(p, &flags);
1622 unlock_task_sighand(p, &flags);
1626 } while ((p = next_thread(p)) != g);
1630 return mm->core_waiters;
1633 static int coredump_wait(int exit_code)
1635 struct task_struct *tsk = current;
1636 struct mm_struct *mm = tsk->mm;
1637 struct completion startup_done;
1638 struct completion *vfork_done;
1641 init_completion(&mm->core_done);
1642 init_completion(&startup_done);
1643 mm->core_startup_done = &startup_done;
1645 core_waiters = zap_threads(tsk, mm, exit_code);
1646 up_write(&mm->mmap_sem);
1648 if (unlikely(core_waiters < 0))
1652 * Make sure nobody is waiting for us to release the VM,
1653 * otherwise we can deadlock when we wait on each other
1655 vfork_done = tsk->vfork_done;
1657 tsk->vfork_done = NULL;
1658 complete(vfork_done);
1662 wait_for_completion(&startup_done);
1664 BUG_ON(mm->core_waiters);
1665 return core_waiters;
1669 * set_dumpable converts traditional three-value dumpable to two flags and
1670 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1671 * these bits are not changed atomically. So get_dumpable can observe the
1672 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1673 * return either old dumpable or new one by paying attention to the order of
1674 * modifying the bits.
1676 * dumpable | mm->flags (binary)
1677 * old new | initial interim final
1678 * ---------+-----------------------
1686 * (*) get_dumpable regards interim value of 10 as 11.
1688 void set_dumpable(struct mm_struct *mm, int value)
1692 clear_bit(MMF_DUMPABLE, &mm->flags);
1694 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1697 set_bit(MMF_DUMPABLE, &mm->flags);
1699 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1702 set_bit(MMF_DUMP_SECURELY, &mm->flags);
1704 set_bit(MMF_DUMPABLE, &mm->flags);
1708 EXPORT_SYMBOL_GPL(set_dumpable);
1710 int get_dumpable(struct mm_struct *mm)
1714 ret = mm->flags & 0x3;
1715 return (ret >= 2) ? 2 : ret;
1718 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1720 char corename[CORENAME_MAX_SIZE + 1];
1721 struct mm_struct *mm = current->mm;
1722 struct linux_binfmt * binfmt;
1723 struct inode * inode;
1726 int fsuid = current->fsuid;
1730 audit_core_dumps(signr);
1732 binfmt = current->binfmt;
1733 if (!binfmt || !binfmt->core_dump)
1735 down_write(&mm->mmap_sem);
1736 if (!get_dumpable(mm)) {
1737 up_write(&mm->mmap_sem);
1742 * We cannot trust fsuid as being the "true" uid of the
1743 * process nor do we know its entire history. We only know it
1744 * was tainted so we dump it as root in mode 2.
1746 if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
1747 flag = O_EXCL; /* Stop rewrite attacks */
1748 current->fsuid = 0; /* Dump root private */
1750 set_dumpable(mm, 0);
1752 retval = coredump_wait(exit_code);
1757 * Clear any false indication of pending signals that might
1758 * be seen by the filesystem code called to write the core file.
1760 clear_thread_flag(TIF_SIGPENDING);
1762 if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1766 * lock_kernel() because format_corename() is controlled by sysctl, which
1767 * uses lock_kernel()
1770 ispipe = format_corename(corename, core_pattern, signr);
1773 /* SIGPIPE can happen, but it's just never processed */
1774 if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) {
1775 printk(KERN_INFO "Core dump to %s pipe failed\n",
1780 file = filp_open(corename,
1781 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1785 inode = file->f_path.dentry->d_inode;
1786 if (inode->i_nlink > 1)
1787 goto close_fail; /* multiple links - don't dump */
1788 if (!ispipe && d_unhashed(file->f_path.dentry))
1791 /* AK: actually i see no reason to not allow this for named pipes etc.,
1792 but keep the previous behaviour for now. */
1793 if (!ispipe && !S_ISREG(inode->i_mode))
1797 if (!file->f_op->write)
1799 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1802 retval = binfmt->core_dump(signr, regs, file);
1805 current->signal->group_exit_code |= 0x80;
1807 filp_close(file, NULL);
1809 current->fsuid = fsuid;
1810 complete_all(&mm->core_done);