2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 #include <linux/sched.h>
9 #include <linux/init.h>
10 #include <linux/random.h>
11 #include <asm/vsyscall.h>
12 #include <asm/vgtod.h>
13 #include <asm/proto.h>
18 #define VEXTERN(x) extern typeof(__ ## x) *vdso_ ## x;
22 extern char vdso_kernel_start[], vdso_start[], vdso_end[];
23 extern unsigned short vdso_sync_cpuid;
25 struct page **vdso_pages;
27 static inline void *var_ref(void *vbase, char *var, char *name)
29 unsigned offset = var - &vdso_kernel_start[0] + VDSO_TEXT_OFFSET;
30 void *p = vbase + offset;
31 if (*(void **)p != (void *)VMAGIC) {
32 printk("VDSO: variable %s broken\n", name);
38 static int __init init_vdso_vars(void)
40 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
44 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
47 for (i = 0; i < npages; i++) {
49 p = alloc_page(GFP_KERNEL);
53 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
56 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
60 if (memcmp(vbase, "\177ELF", 4)) {
61 printk("VDSO: I'm broken; not ELF\n");
65 #define V(x) *(typeof(x) *) var_ref(vbase, (char *)RELOC_HIDE(&x, 0), #x)
67 V(vdso_ ## x) = &__ ## x;
73 printk("Cannot allocate vdso\n");
77 __initcall(init_vdso_vars);
81 /* Put the vdso above the (randomized) stack with another randomized offset.
82 This way there is no hole in the middle of address space.
83 To save memory make sure it is still in the same PTE as the stack top.
84 This doesn't give that many random bits */
85 static unsigned long vdso_addr(unsigned long start, unsigned len)
87 unsigned long addr, end;
89 end = (start + PMD_SIZE - 1) & PMD_MASK;
90 if (end >= TASK_SIZE64)
93 /* This loses some more bits than a modulo, but is cheaper */
94 offset = get_random_int() & (PTRS_PER_PTE - 1);
95 addr = start + (offset << PAGE_SHIFT);
101 /* Setup a VMA at program startup for the vsyscall page.
102 Not called for compat tasks */
103 int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
105 struct mm_struct *mm = current->mm;
108 unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE);
113 down_write(&mm->mmap_sem);
114 addr = vdso_addr(mm->start_stack, len);
115 addr = get_unmapped_area(NULL, addr, len, 0, 0);
116 if (IS_ERR_VALUE(addr)) {
121 ret = install_special_mapping(mm, addr, len,
123 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
129 current->mm->context.vdso = (void *)addr;
131 up_write(&mm->mmap_sem);
135 static __init int vdso_setup(char *s)
137 vdso_enabled = simple_strtoul(s, NULL, 0);
140 __setup("vdso=", vdso_setup);