[IA64] ia32 nopage
[linux-2.6] / arch / ia64 / ia32 / binfmt_elf32.c
1 /*
2  * IA-32 ELF support.
3  *
4  * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5  * Copyright (C) 2001 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  *
8  * 06/16/00     A. Mallick      initialize csd/ssd/tssd/cflg for ia32_load_state
9  * 04/13/01     D. Mosberger    dropped saving tssd in ar.k1---it's not needed
10  * 09/14/01     D. Mosberger    fixed memory management for gdt/tss page
11  */
12
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/security.h>
16
17 #include <asm/param.h>
18 #include <asm/signal.h>
19
20 #include "ia32priv.h"
21 #include "elfcore32.h"
22
23 /* Override some function names */
24 #undef start_thread
25 #define start_thread                    ia32_start_thread
26 #define elf_format                      elf32_format
27 #define init_elf_binfmt                 init_elf32_binfmt
28 #define exit_elf_binfmt                 exit_elf32_binfmt
29
30 #undef CLOCKS_PER_SEC
31 #define CLOCKS_PER_SEC  IA32_CLOCKS_PER_SEC
32
33 extern void ia64_elf32_init (struct pt_regs *regs);
34
35 static void elf32_set_personality (void);
36
37 static unsigned long __attribute ((unused))
38 randomize_stack_top(unsigned long stack_top);
39
40 #define setup_arg_pages(bprm,tos,exec)          ia32_setup_arg_pages(bprm,exec)
41 #define elf_map                         elf32_map
42
43 #undef SET_PERSONALITY
44 #define SET_PERSONALITY(ex, ibcs2)      elf32_set_personality()
45
46 #define elf_read_implies_exec(ex, have_pt_gnu_stack)    (!(have_pt_gnu_stack))
47
48 /* Ugly but avoids duplication */
49 #include "../../../fs/binfmt_elf.c"
50
51 extern struct page *ia32_shared_page[];
52 extern unsigned long *ia32_gdt;
53 extern struct page *ia32_gate_page;
54
55 int
56 ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf)
57 {
58         vmf->page = ia32_shared_page[smp_processor_id()];
59         get_page(vmf->page);
60         return 0;
61 }
62
63 int
64 ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf)
65 {
66         vmf->page = ia32_gate_page;
67         get_page(vmf->page);
68         return 0;
69 }
70
71
72 static struct vm_operations_struct ia32_shared_page_vm_ops = {
73         .fault = ia32_install_shared_page
74 };
75
76 static struct vm_operations_struct ia32_gate_page_vm_ops = {
77         .fault = ia32_install_gate_page
78 };
79
80 void
81 ia64_elf32_init (struct pt_regs *regs)
82 {
83         struct vm_area_struct *vma;
84
85         /*
86          * Map GDT below 4GB, where the processor can find it.  We need to map
87          * it with privilege level 3 because the IVE uses non-privileged accesses to these
88          * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
89          */
90         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
91         if (vma) {
92                 vma->vm_mm = current->mm;
93                 vma->vm_start = IA32_GDT_OFFSET;
94                 vma->vm_end = vma->vm_start + PAGE_SIZE;
95                 vma->vm_page_prot = PAGE_SHARED;
96                 vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
97                 vma->vm_ops = &ia32_shared_page_vm_ops;
98                 down_write(&current->mm->mmap_sem);
99                 {
100                         if (insert_vm_struct(current->mm, vma)) {
101                                 kmem_cache_free(vm_area_cachep, vma);
102                                 up_write(&current->mm->mmap_sem);
103                                 BUG();
104                         }
105                 }
106                 up_write(&current->mm->mmap_sem);
107         }
108
109         /*
110          * When user stack is not executable, push sigreturn code to stack makes
111          * segmentation fault raised when returning to kernel. So now sigreturn
112          * code is locked in specific gate page, which is pointed by pretcode
113          * when setup_frame_ia32
114          */
115         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
116         if (vma) {
117                 vma->vm_mm = current->mm;
118                 vma->vm_start = IA32_GATE_OFFSET;
119                 vma->vm_end = vma->vm_start + PAGE_SIZE;
120                 vma->vm_page_prot = PAGE_COPY_EXEC;
121                 vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
122                                 | VM_MAYEXEC | VM_RESERVED;
123                 vma->vm_ops = &ia32_gate_page_vm_ops;
124                 down_write(&current->mm->mmap_sem);
125                 {
126                         if (insert_vm_struct(current->mm, vma)) {
127                                 kmem_cache_free(vm_area_cachep, vma);
128                                 up_write(&current->mm->mmap_sem);
129                                 BUG();
130                         }
131                 }
132                 up_write(&current->mm->mmap_sem);
133         }
134
135         /*
136          * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
137          * until a task modifies them via modify_ldt().
138          */
139         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
140         if (vma) {
141                 vma->vm_mm = current->mm;
142                 vma->vm_start = IA32_LDT_OFFSET;
143                 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
144                 vma->vm_page_prot = PAGE_SHARED;
145                 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
146                 down_write(&current->mm->mmap_sem);
147                 {
148                         if (insert_vm_struct(current->mm, vma)) {
149                                 kmem_cache_free(vm_area_cachep, vma);
150                                 up_write(&current->mm->mmap_sem);
151                                 BUG();
152                         }
153                 }
154                 up_write(&current->mm->mmap_sem);
155         }
156
157         ia64_psr(regs)->ac = 0;         /* turn off alignment checking */
158         regs->loadrs = 0;
159         /*
160          *  According to the ABI %edx points to an `atexit' handler.  Since we don't have
161          *  one we'll set it to 0 and initialize all the other registers just to make
162          *  things more deterministic, ala the i386 implementation.
163          */
164         regs->r8 = 0;   /* %eax */
165         regs->r11 = 0;  /* %ebx */
166         regs->r9 = 0;   /* %ecx */
167         regs->r10 = 0;  /* %edx */
168         regs->r13 = 0;  /* %ebp */
169         regs->r14 = 0;  /* %esi */
170         regs->r15 = 0;  /* %edi */
171
172         current->thread.eflag = IA32_EFLAG;
173         current->thread.fsr = IA32_FSR_DEFAULT;
174         current->thread.fcr = IA32_FCR_DEFAULT;
175         current->thread.fir = 0;
176         current->thread.fdr = 0;
177
178         /*
179          * Setup GDTD.  Note: GDTD is the descrambled version of the pseudo-descriptor
180          * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
181          * architecture manual. Also note that the only fields that are not ignored are
182          * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
183          */
184         regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
185                                                             0, 0, 0, 1, 0, 0, 0));
186         /* Setup the segment selectors */
187         regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
188         regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
189
190         ia32_load_segment_descriptors(current);
191         ia32_load_state(current);
192 }
193
194 /*
195  * Undo the override of setup_arg_pages() without this ia32_setup_arg_pages()
196  * will suffer infinite self recursion.
197  */
198 #undef setup_arg_pages
199
200 int
201 ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
202 {
203         int ret;
204
205         ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack);
206         if (!ret) {
207                 /*
208                  * Can't do it in ia64_elf32_init(). Needs to be done before
209                  * calls to elf32_map()
210                  */
211                 current->thread.ppl = ia32_init_pp_list();
212         }
213
214         return ret;
215 }
216
217 static void
218 elf32_set_personality (void)
219 {
220         set_personality(PER_LINUX32);
221         current->thread.map_base  = IA32_PAGE_OFFSET/3;
222 }
223
224 static unsigned long
225 elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
226 {
227         unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
228
229         return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
230                             eppnt->p_offset - pgoff);
231 }
232
233 #define cpu_uses_ia32el()       (local_cpu_data->family > 0x1f)
234
235 static int __init check_elf32_binfmt(void)
236 {
237         if (cpu_uses_ia32el()) {
238                 printk("Please use IA-32 EL for executing IA-32 binaries\n");
239                 unregister_binfmt(&elf_format);
240         }
241         return 0;
242 }
243
244 module_init(check_elf32_binfmt)