[PATCH] uml: skas0 - separate kernel address space on stock hosts
[linux-2.6] / arch / um / kernel / tlb.c
1 /* 
2  * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/mm.h"
7 #include "asm/page.h"
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
13 #include "tlb.h"
14 #include "mem.h"
15 #include "mem_user.h"
16 #include "os.h"
17
18 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
19
20 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
21                       unsigned long end_addr, int force,
22                       void (*do_ops)(union mm_context *, struct host_vm_op *,
23                                      int))
24 {
25         pgd_t *npgd;
26         pud_t *npud;
27         pmd_t *npmd;
28         pte_t *npte;
29         union mm_context *mmu = &mm->context;
30         unsigned long addr, end;
31         int r, w, x;
32         struct host_vm_op ops[16];
33         int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
34
35         if(mm == NULL) return;
36
37         for(addr = start_addr; addr < end_addr;){
38                 npgd = pgd_offset(mm, addr);
39                 if(!pgd_present(*npgd)){
40                         end = ADD_ROUND(addr, PGDIR_SIZE);
41                         if(end > end_addr)
42                                 end = end_addr;
43                         if(force || pgd_newpage(*npgd)){
44                                 op_index = add_munmap(addr, end - addr, ops,
45                                                       op_index, last_op, mmu,
46                                                       do_ops);
47                                 pgd_mkuptodate(*npgd);
48                         }
49                         addr = end;
50                         continue;
51                 }
52
53                 npud = pud_offset(npgd, addr);
54                 if(!pud_present(*npud)){
55                         end = ADD_ROUND(addr, PUD_SIZE);
56                         if(end > end_addr)
57                                 end = end_addr;
58                         if(force || pud_newpage(*npud)){
59                                 op_index = add_munmap(addr, end - addr, ops,
60                                                       op_index, last_op, mmu,
61                                                       do_ops);
62                                 pud_mkuptodate(*npud);
63                         }
64                         addr = end;
65                         continue;
66                 }
67
68                 npmd = pmd_offset(npud, addr);
69                 if(!pmd_present(*npmd)){
70                         end = ADD_ROUND(addr, PMD_SIZE);
71                         if(end > end_addr)
72                                 end = end_addr;
73                         if(force || pmd_newpage(*npmd)){
74                                 op_index = add_munmap(addr, end - addr, ops,
75                                                       op_index, last_op, mmu,
76                                                       do_ops);
77                                 pmd_mkuptodate(*npmd);
78                         }
79                         addr = end;
80                         continue;
81                 }
82
83                 npte = pte_offset_kernel(npmd, addr);
84                 r = pte_read(*npte);
85                 w = pte_write(*npte);
86                 x = pte_exec(*npte);
87                 if(!pte_dirty(*npte))
88                         w = 0;
89                 if(!pte_young(*npte)){
90                         r = 0;
91                         w = 0;
92                 }
93                 if(force || pte_newpage(*npte)){
94                         if(pte_present(*npte))
95                                 op_index = add_mmap(addr,
96                                                     pte_val(*npte) & PAGE_MASK,
97                                                     PAGE_SIZE, r, w, x, ops,
98                                                     op_index, last_op, mmu,
99                                                     do_ops);
100                         else op_index = add_munmap(addr, PAGE_SIZE, ops,
101                                                    op_index, last_op, mmu,
102                                                    do_ops);
103                 }
104                 else if(pte_newprot(*npte))
105                         op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
106                                                 op_index, last_op, mmu,
107                                                 do_ops);
108
109                 *npte = pte_mkuptodate(*npte);
110                 addr += PAGE_SIZE;
111         }
112         (*do_ops)(mmu, ops, op_index);
113 }
114
115 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
116 {
117         struct mm_struct *mm;
118         pgd_t *pgd;
119         pud_t *pud;
120         pmd_t *pmd;
121         pte_t *pte;
122         unsigned long addr, last;
123         int updated = 0, err;
124
125         mm = &init_mm;
126         for(addr = start; addr < end;){
127                 pgd = pgd_offset(mm, addr);
128                 if(!pgd_present(*pgd)){
129                         last = ADD_ROUND(addr, PGDIR_SIZE);
130                         if(last > end)
131                                 last = end;
132                         if(pgd_newpage(*pgd)){
133                                 updated = 1;
134                                 err = os_unmap_memory((void *) addr,
135                                                       last - addr);
136                                 if(err < 0)
137                                         panic("munmap failed, errno = %d\n",
138                                               -err);
139                         }
140                         addr = last;
141                         continue;
142                 }
143
144                 pud = pud_offset(pgd, addr);
145                 if(!pud_present(*pud)){
146                         last = ADD_ROUND(addr, PUD_SIZE);
147                         if(last > end)
148                                 last = end;
149                         if(pud_newpage(*pud)){
150                                 updated = 1;
151                                 err = os_unmap_memory((void *) addr,
152                                                       last - addr);
153                                 if(err < 0)
154                                         panic("munmap failed, errno = %d\n",
155                                               -err);
156                         }
157                         addr = last;
158                         continue;
159                 }
160
161                 pmd = pmd_offset(pud, addr);
162                 if(!pmd_present(*pmd)){
163                         last = ADD_ROUND(addr, PMD_SIZE);
164                         if(last > end)
165                                 last = end;
166                         if(pmd_newpage(*pmd)){
167                                 updated = 1;
168                                 err = os_unmap_memory((void *) addr,
169                                                       last - addr);
170                                 if(err < 0)
171                                         panic("munmap failed, errno = %d\n",
172                                               -err);
173                         }
174                         addr = last;
175                         continue;
176                 }
177
178                 pte = pte_offset_kernel(pmd, addr);
179                 if(!pte_present(*pte) || pte_newpage(*pte)){
180                         updated = 1;
181                         err = os_unmap_memory((void *) addr,
182                                               PAGE_SIZE);
183                         if(err < 0)
184                                 panic("munmap failed, errno = %d\n",
185                                       -err);
186                         if(pte_present(*pte))
187                                 map_memory(addr,
188                                            pte_val(*pte) & PAGE_MASK,
189                                            PAGE_SIZE, 1, 1, 1);
190                 }
191                 else if(pte_newprot(*pte)){
192                         updated = 1;
193                         protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
194                 }
195                 addr += PAGE_SIZE;
196         }
197         return(updated);
198 }
199
200 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
201 {
202         return(pgd_offset(mm, address));
203 }
204
205 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
206 {
207         return(pud_offset(pgd, address));
208 }
209
210 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
211 {
212         return(pmd_offset(pud, address));
213 }
214
215 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
216 {
217         return(pte_offset_kernel(pmd, address));
218 }
219
220 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
221 {
222         pgd_t *pgd = pgd_offset(task->mm, addr);
223         pud_t *pud = pud_offset(pgd, addr);
224         pmd_t *pmd = pmd_offset(pud, addr);
225
226         return(pte_offset_map(pmd, addr));
227 }
228
229 int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
230              int r, int w, int x, struct host_vm_op *ops, int index,
231              int last_filled, union mm_context *mmu,
232              void (*do_ops)(union mm_context *, struct host_vm_op *, int))
233 {
234         __u64 offset;
235         struct host_vm_op *last;
236         int fd;
237
238         fd = phys_mapping(phys, &offset);
239         if(index != -1){
240                 last = &ops[index];
241                 if((last->type == MMAP) &&
242                    (last->u.mmap.addr + last->u.mmap.len == virt) &&
243                    (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
244                    (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
245                    (last->u.mmap.offset + last->u.mmap.len == offset)){
246                         last->u.mmap.len += len;
247                         return(index);
248                 }
249         }
250
251         if(index == last_filled){
252                 (*do_ops)(mmu, ops, last_filled);
253                 index = -1;
254         }
255
256         ops[++index] = ((struct host_vm_op) { .type     = MMAP,
257                                               .u = { .mmap = {
258                                                       .addr     = virt,
259                                                       .len      = len,
260                                                       .r        = r,
261                                                       .w        = w,
262                                                       .x        = x,
263                                                       .fd       = fd,
264                                                       .offset   = offset }
265                                               } });
266         return(index);
267 }
268
269 int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
270                int index, int last_filled, union mm_context *mmu,
271                void (*do_ops)(union mm_context *, struct host_vm_op *, int))
272 {
273         struct host_vm_op *last;
274
275         if(index != -1){
276                 last = &ops[index];
277                 if((last->type == MUNMAP) &&
278                    (last->u.munmap.addr + last->u.mmap.len == addr)){
279                         last->u.munmap.len += len;
280                         return(index);
281                 }
282         }
283
284         if(index == last_filled){
285                 (*do_ops)(mmu, ops, last_filled);
286                 index = -1;
287         }
288
289         ops[++index] = ((struct host_vm_op) { .type     = MUNMAP,
290                                               .u = { .munmap = {
291                                                       .addr     = addr,
292                                                       .len      = len } } });
293         return(index);
294 }
295
296 int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
297                  struct host_vm_op *ops, int index, int last_filled,
298                  union mm_context *mmu,
299                  void (*do_ops)(union mm_context *, struct host_vm_op *, int))
300 {
301         struct host_vm_op *last;
302
303         if(index != -1){
304                 last = &ops[index];
305                 if((last->type == MPROTECT) &&
306                    (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
307                    (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
308                    (last->u.mprotect.x == x)){
309                         last->u.mprotect.len += len;
310                         return(index);
311                 }
312         }
313
314         if(index == last_filled){
315                 (*do_ops)(mmu, ops, last_filled);
316                 index = -1;
317         }
318
319         ops[++index] = ((struct host_vm_op) { .type     = MPROTECT,
320                                               .u = { .mprotect = {
321                                                       .addr     = addr,
322                                                       .len      = len,
323                                                       .r        = r,
324                                                       .w        = w,
325                                                       .x        = x } } });
326         return(index);
327 }
328
329 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
330 {
331         address &= PAGE_MASK;
332         flush_tlb_range(vma, address, address + PAGE_SIZE);
333 }
334
335 void flush_tlb_all(void)
336 {
337         flush_tlb_mm(current->mm);
338 }
339
340 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
341 {
342         CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
343                          flush_tlb_kernel_range_common, start, end);
344 }
345
346 void flush_tlb_kernel_vm(void)
347 {
348         CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
349                     flush_tlb_kernel_range_common(start_vm, end_vm));
350 }
351
352 void __flush_tlb_one(unsigned long addr)
353 {
354         CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
355 }
356
357 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
358                      unsigned long end)
359 {
360         CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
361                          end);
362 }
363
364 void flush_tlb_mm(struct mm_struct *mm)
365 {
366         CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
367 }
368
369 void force_flush_all(void)
370 {
371         CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
372 }
373