Merge commit 'v2.6.29-rc1' into x86/urgent
[linux-2.6] / arch / um / kernel / mem.c
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5
6 #include <linux/stddef.h>
7 #include <linux/bootmem.h>
8 #include <linux/gfp.h>
9 #include <linux/highmem.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <asm/fixmap.h>
13 #include <asm/page.h>
14 #include "as-layout.h"
15 #include "init.h"
16 #include "kern.h"
17 #include "kern_util.h"
18 #include "mem_user.h"
19 #include "os.h"
20
21 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
22 unsigned long *empty_zero_page = NULL;
23 /* allocated in paging_init and unchanged thereafter */
24 static unsigned long *empty_bad_page = NULL;
25
26 /*
27  * Initialized during boot, and readonly for initializing page tables
28  * afterwards
29  */
30 pgd_t swapper_pg_dir[PTRS_PER_PGD];
31
32 /* Initialized at boot time, and readonly after that */
33 unsigned long long highmem;
34 int kmalloc_ok = 0;
35
36 /* Used during early boot */
37 static unsigned long brk_end;
38
39 #ifdef CONFIG_HIGHMEM
40 static void setup_highmem(unsigned long highmem_start,
41                           unsigned long highmem_len)
42 {
43         struct page *page;
44         unsigned long highmem_pfn;
45         int i;
46
47         highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
48         for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
49                 page = &mem_map[highmem_pfn + i];
50                 ClearPageReserved(page);
51                 init_page_count(page);
52                 __free_page(page);
53         }
54 }
55 #endif
56
57 void __init mem_init(void)
58 {
59         /* clear the zero-page */
60         memset(empty_zero_page, 0, PAGE_SIZE);
61
62         /* Map in the area just after the brk now that kmalloc is about
63          * to be turned on.
64          */
65         brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
66         map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
67         free_bootmem(__pa(brk_end), uml_reserved - brk_end);
68         uml_reserved = brk_end;
69
70         /* this will put all low memory onto the freelists */
71         totalram_pages = free_all_bootmem();
72         max_low_pfn = totalram_pages;
73 #ifdef CONFIG_HIGHMEM
74         totalhigh_pages = highmem >> PAGE_SHIFT;
75         totalram_pages += totalhigh_pages;
76 #endif
77         num_physpages = totalram_pages;
78         max_pfn = totalram_pages;
79         printk(KERN_INFO "Memory: %luk available\n",
80                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
81         kmalloc_ok = 1;
82
83 #ifdef CONFIG_HIGHMEM
84         setup_highmem(end_iomem, highmem);
85 #endif
86 }
87
88 /*
89  * Create a page table and place a pointer to it in a middle page
90  * directory entry.
91  */
92 static void __init one_page_table_init(pmd_t *pmd)
93 {
94         if (pmd_none(*pmd)) {
95                 pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
96                 set_pmd(pmd, __pmd(_KERNPG_TABLE +
97                                            (unsigned long) __pa(pte)));
98                 if (pte != pte_offset_kernel(pmd, 0))
99                         BUG();
100         }
101 }
102
103 static void __init one_md_table_init(pud_t *pud)
104 {
105 #ifdef CONFIG_3_LEVEL_PGTABLES
106         pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
107         set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
108         if (pmd_table != pmd_offset(pud, 0))
109                 BUG();
110 #endif
111 }
112
113 static void __init fixrange_init(unsigned long start, unsigned long end,
114                                  pgd_t *pgd_base)
115 {
116         pgd_t *pgd;
117         pud_t *pud;
118         pmd_t *pmd;
119         int i, j;
120         unsigned long vaddr;
121
122         vaddr = start;
123         i = pgd_index(vaddr);
124         j = pmd_index(vaddr);
125         pgd = pgd_base + i;
126
127         for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
128                 pud = pud_offset(pgd, vaddr);
129                 if (pud_none(*pud))
130                         one_md_table_init(pud);
131                 pmd = pmd_offset(pud, vaddr);
132                 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
133                         one_page_table_init(pmd);
134                         vaddr += PMD_SIZE;
135                 }
136                 j = 0;
137         }
138 }
139
140 #ifdef CONFIG_HIGHMEM
141 pte_t *kmap_pte;
142 pgprot_t kmap_prot;
143
144 #define kmap_get_fixmap_pte(vaddr)                                      \
145         pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
146                                      (vaddr)), (vaddr))
147
148 static void __init kmap_init(void)
149 {
150         unsigned long kmap_vstart;
151
152         /* cache the first kmap pte */
153         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
154         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
155
156         kmap_prot = PAGE_KERNEL;
157 }
158
159 static void __init init_highmem(void)
160 {
161         pgd_t *pgd;
162         pud_t *pud;
163         pmd_t *pmd;
164         pte_t *pte;
165         unsigned long vaddr;
166
167         /*
168          * Permanent kmaps:
169          */
170         vaddr = PKMAP_BASE;
171         fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
172
173         pgd = swapper_pg_dir + pgd_index(vaddr);
174         pud = pud_offset(pgd, vaddr);
175         pmd = pmd_offset(pud, vaddr);
176         pte = pte_offset_kernel(pmd, vaddr);
177         pkmap_page_table = pte;
178
179         kmap_init();
180 }
181 #endif /* CONFIG_HIGHMEM */
182
183 static void __init fixaddr_user_init( void)
184 {
185 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
186         long size = FIXADDR_USER_END - FIXADDR_USER_START;
187         pgd_t *pgd;
188         pud_t *pud;
189         pmd_t *pmd;
190         pte_t *pte;
191         phys_t p;
192         unsigned long v, vaddr = FIXADDR_USER_START;
193
194         if (!size)
195                 return;
196
197         fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
198         v = (unsigned long) alloc_bootmem_low_pages(size);
199         memcpy((void *) v , (void *) FIXADDR_USER_START, size);
200         p = __pa(v);
201         for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
202                       p += PAGE_SIZE) {
203                 pgd = swapper_pg_dir + pgd_index(vaddr);
204                 pud = pud_offset(pgd, vaddr);
205                 pmd = pmd_offset(pud, vaddr);
206                 pte = pte_offset_kernel(pmd, vaddr);
207                 pte_set_val(*pte, p, PAGE_READONLY);
208         }
209 #endif
210 }
211
212 void __init paging_init(void)
213 {
214         unsigned long zones_size[MAX_NR_ZONES], vaddr;
215         int i;
216
217         empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
218         empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
219         for (i = 0; i < ARRAY_SIZE(zones_size); i++)
220                 zones_size[i] = 0;
221
222         zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
223                 (uml_physmem >> PAGE_SHIFT);
224 #ifdef CONFIG_HIGHMEM
225         zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
226 #endif
227         free_area_init(zones_size);
228
229         /*
230          * Fixed mappings, only the page table structure has to be
231          * created - mappings will be set by set_fixmap():
232          */
233         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
234         fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
235
236         fixaddr_user_init();
237
238 #ifdef CONFIG_HIGHMEM
239         init_highmem();
240 #endif
241 }
242
243 /*
244  * This can't do anything because nothing in the kernel image can be freed
245  * since it's not in kernel physical memory.
246  */
247
248 void free_initmem(void)
249 {
250 }
251
252 #ifdef CONFIG_BLK_DEV_INITRD
253 void free_initrd_mem(unsigned long start, unsigned long end)
254 {
255         if (start < end)
256                 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
257                        (end - start) >> 10);
258         for (; start < end; start += PAGE_SIZE) {
259                 ClearPageReserved(virt_to_page(start));
260                 init_page_count(virt_to_page(start));
261                 free_page(start);
262                 totalram_pages++;
263         }
264 }
265 #endif
266
267 /* Allocate and free page tables. */
268
269 pgd_t *pgd_alloc(struct mm_struct *mm)
270 {
271         pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
272
273         if (pgd) {
274                 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
275                 memcpy(pgd + USER_PTRS_PER_PGD,
276                        swapper_pg_dir + USER_PTRS_PER_PGD,
277                        (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
278         }
279         return pgd;
280 }
281
282 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
283 {
284         free_page((unsigned long) pgd);
285 }
286
287 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
288 {
289         pte_t *pte;
290
291         pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
292         return pte;
293 }
294
295 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
296 {
297         struct page *pte;
298
299         pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
300         if (pte)
301                 pgtable_page_ctor(pte);
302         return pte;
303 }
304
305 #ifdef CONFIG_3_LEVEL_PGTABLES
306 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
307 {
308         pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
309
310         if (pmd)
311                 memset(pmd, 0, PAGE_SIZE);
312
313         return pmd;
314 }
315 #endif
316
317 void *uml_kmalloc(int size, int flags)
318 {
319         return kmalloc(size, flags);
320 }