uml: style fixes pass 3
[linux-2.6] / arch / um / kernel / skas / mmu.c
1 /*
2  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/mm.h"
7 #include "linux/sched.h"
8 #include "asm/pgalloc.h"
9 #include "asm/pgtable.h"
10 #include "os.h"
11 #include "skas.h"
12
13 extern int __syscall_stub_start;
14
15 static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
16                          unsigned long kernel)
17 {
18         pgd_t *pgd;
19         pud_t *pud;
20         pmd_t *pmd;
21         pte_t *pte;
22
23         pgd = pgd_offset(mm, proc);
24         pud = pud_alloc(mm, pgd, proc);
25         if (!pud)
26                 goto out;
27
28         pmd = pmd_alloc(mm, pud, proc);
29         if (!pmd)
30                 goto out_pmd;
31
32         pte = pte_alloc_map(mm, pmd, proc);
33         if (!pte)
34                 goto out_pte;
35
36         /*
37          * There's an interaction between the skas0 stub pages, stack
38          * randomization, and the BUG at the end of exit_mmap.  exit_mmap
39          * checks that the number of page tables freed is the same as had
40          * been allocated.  If the stack is on the last page table page,
41          * then the stack pte page will be freed, and if not, it won't.  To
42          * avoid having to know where the stack is, or if the process mapped
43          * something at the top of its address space for some other reason,
44          * we set TASK_SIZE to end at the start of the last page table.
45          * This keeps exit_mmap off the last page, but introduces a leak
46          * of that page.  So, we hang onto it here and free it in
47          * destroy_context_skas.
48          */
49
50         mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
51 #ifdef CONFIG_3_LEVEL_PGTABLES
52         mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
53 #endif
54
55         *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
56         *pte = pte_mkread(*pte);
57         return 0;
58
59  out_pmd:
60         pud_free(pud);
61  out_pte:
62         pmd_free(pmd);
63  out:
64         return -ENOMEM;
65 }
66
67 int init_new_context(struct task_struct *task, struct mm_struct *mm)
68 {
69         struct mmu_context_skas *from_mm = NULL;
70         struct mmu_context_skas *to_mm = &mm->context.skas;
71         unsigned long stack = 0;
72         int ret = -ENOMEM;
73
74         if (skas_needs_stub) {
75                 stack = get_zeroed_page(GFP_KERNEL);
76                 if (stack == 0)
77                         goto out;
78
79                 /*
80                  * This zeros the entry that pgd_alloc didn't, needed since
81                  * we are about to reinitialize it, and want mm.nr_ptes to
82                  * be accurate.
83                  */
84                 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
85
86                 ret = init_stub_pte(mm, CONFIG_STUB_CODE,
87                                     (unsigned long) &__syscall_stub_start);
88                 if (ret)
89                         goto out_free;
90
91                 ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
92                 if (ret)
93                         goto out_free;
94
95                 mm->nr_ptes--;
96         }
97
98         to_mm->id.stack = stack;
99         if (current->mm != NULL && current->mm != &init_mm)
100                 from_mm = &current->mm->context.skas;
101
102         if (proc_mm) {
103                 ret = new_mm(stack);
104                 if (ret < 0) {
105                         printk(KERN_ERR "init_new_context_skas - "
106                                "new_mm failed, errno = %d\n", ret);
107                         goto out_free;
108                 }
109                 to_mm->id.u.mm_fd = ret;
110         }
111         else {
112                 if (from_mm)
113                         to_mm->id.u.pid = copy_context_skas0(stack,
114                                                              from_mm->id.u.pid);
115                 else to_mm->id.u.pid = start_userspace(stack);
116         }
117
118         ret = init_new_ldt(to_mm, from_mm);
119         if (ret < 0) {
120                 printk(KERN_ERR "init_new_context_skas - init_ldt"
121                        " failed, errno = %d\n", ret);
122                 goto out_free;
123         }
124
125         return 0;
126
127  out_free:
128         if (to_mm->id.stack != 0)
129                 free_page(to_mm->id.stack);
130  out:
131         return ret;
132 }
133
134 void destroy_context(struct mm_struct *mm)
135 {
136         struct mmu_context_skas *mmu = &mm->context.skas;
137
138         if (proc_mm)
139                 os_close_file(mmu->id.u.mm_fd);
140         else
141                 os_kill_ptraced_process(mmu->id.u.pid, 1);
142
143         if (!proc_mm || !ptrace_faultinfo) {
144                 free_page(mmu->id.stack);
145                 pte_lock_deinit(virt_to_page(mmu->last_page_table));
146                 pte_free_kernel((pte_t *) mmu->last_page_table);
147                 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
148 #ifdef CONFIG_3_LEVEL_PGTABLES
149                 pmd_free((pmd_t *) mmu->last_pmd);
150 #endif
151         }
152 }