2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
6 #include <linux/stddef.h>
7 #include <linux/bootmem.h>
9 #include <linux/highmem.h>
11 #include <linux/swap.h>
12 #include <asm/fixmap.h>
14 #include "as-layout.h"
17 #include "kern_util.h"
21 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
22 unsigned long *empty_zero_page = NULL;
23 /* allocated in paging_init and unchanged thereafter */
24 unsigned long *empty_bad_page = NULL;
27 * Initialized during boot, and readonly for initializing page tables
30 pgd_t swapper_pg_dir[PTRS_PER_PGD];
32 /* Initialized at boot time, and readonly after that */
33 unsigned long long highmem;
36 /* Used during early boot */
37 static unsigned long brk_end;
40 static void setup_highmem(unsigned long highmem_start,
41 unsigned long highmem_len)
44 unsigned long highmem_pfn;
47 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
48 for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
49 page = &mem_map[highmem_pfn + i];
50 ClearPageReserved(page);
51 init_page_count(page);
57 void __init mem_init(void)
59 /* clear the zero-page */
60 memset(empty_zero_page, 0, PAGE_SIZE);
62 /* Map in the area just after the brk now that kmalloc is about
65 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
66 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
67 free_bootmem(__pa(brk_end), uml_reserved - brk_end);
68 uml_reserved = brk_end;
70 /* this will put all low memory onto the freelists */
71 totalram_pages = free_all_bootmem();
72 max_low_pfn = totalram_pages;
74 totalhigh_pages = highmem >> PAGE_SHIFT;
75 totalram_pages += totalhigh_pages;
77 num_physpages = totalram_pages;
78 max_pfn = totalram_pages;
79 printk(KERN_INFO "Memory: %luk available\n",
80 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
84 setup_highmem(end_iomem, highmem);
89 * Create a page table and place a pointer to it in a middle page
92 static void __init one_page_table_init(pmd_t *pmd)
95 pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
96 set_pmd(pmd, __pmd(_KERNPG_TABLE +
97 (unsigned long) __pa(pte)));
98 if (pte != pte_offset_kernel(pmd, 0))
103 static void __init one_md_table_init(pud_t *pud)
105 #ifdef CONFIG_3_LEVEL_PGTABLES
106 pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
107 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
108 if (pmd_table != pmd_offset(pud, 0))
113 static void __init fixrange_init(unsigned long start, unsigned long end,
123 i = pgd_index(vaddr);
124 j = pmd_index(vaddr);
127 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
128 pud = pud_offset(pgd, vaddr);
130 one_md_table_init(pud);
131 pmd = pmd_offset(pud, vaddr);
132 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
133 one_page_table_init(pmd);
140 #ifdef CONFIG_HIGHMEM
144 #define kmap_get_fixmap_pte(vaddr) \
145 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
148 static void __init kmap_init(void)
150 unsigned long kmap_vstart;
152 /* cache the first kmap pte */
153 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
154 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
156 kmap_prot = PAGE_KERNEL;
159 static void __init init_highmem(void)
171 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
173 pgd = swapper_pg_dir + pgd_index(vaddr);
174 pud = pud_offset(pgd, vaddr);
175 pmd = pmd_offset(pud, vaddr);
176 pte = pte_offset_kernel(pmd, vaddr);
177 pkmap_page_table = pte;
181 #endif /* CONFIG_HIGHMEM */
183 static void __init fixaddr_user_init( void)
185 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
186 long size = FIXADDR_USER_END - FIXADDR_USER_START;
192 unsigned long v, vaddr = FIXADDR_USER_START;
197 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
198 v = (unsigned long) alloc_bootmem_low_pages(size);
199 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
201 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
203 pgd = swapper_pg_dir + pgd_index(vaddr);
204 pud = pud_offset(pgd, vaddr);
205 pmd = pmd_offset(pud, vaddr);
206 pte = pte_offset_kernel(pmd, vaddr);
207 pte_set_val(*pte, p, PAGE_READONLY);
212 void __init paging_init(void)
214 unsigned long zones_size[MAX_NR_ZONES], vaddr;
217 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
218 empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
219 for (i = 0; i < ARRAY_SIZE(zones_size); i++)
222 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
223 (uml_physmem >> PAGE_SHIFT);
224 #ifdef CONFIG_HIGHMEM
225 zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
227 free_area_init(zones_size);
230 * Fixed mappings, only the page table structure has to be
231 * created - mappings will be set by set_fixmap():
233 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
234 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
238 #ifdef CONFIG_HIGHMEM
243 struct page *arch_validate(struct page *page, gfp_t mask, int order)
245 unsigned long addr, zero = 0;
251 if (PageHighMem(page))
254 addr = (unsigned long) page_address(page);
255 for (i = 0; i < (1 << order); i++) {
256 current->thread.fault_addr = (void *) addr;
257 if (__do_copy_to_user((void __user *) addr, &zero,
259 ¤t->thread.fault_addr,
260 ¤t->thread.fault_catcher)) {
261 if (!(mask & __GFP_WAIT))
268 if (i == (1 << order))
270 page = alloc_pages(mask, order);
275 * This can't do anything because nothing in the kernel image can be freed
276 * since it's not in kernel physical memory.
279 void free_initmem(void)
283 #ifdef CONFIG_BLK_DEV_INITRD
284 void free_initrd_mem(unsigned long start, unsigned long end)
287 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
288 (end - start) >> 10);
289 for (; start < end; start += PAGE_SIZE) {
290 ClearPageReserved(virt_to_page(start));
291 init_page_count(virt_to_page(start));
300 int pfn, total = 0, reserved = 0;
301 int shared = 0, cached = 0;
305 printk(KERN_INFO "Mem-info:\n");
307 printk(KERN_INFO "Free swap: %6ldkB\n",
308 nr_swap_pages<<(PAGE_SHIFT-10));
311 page = pfn_to_page(pfn);
313 if (PageHighMem(page))
315 if (PageReserved(page))
317 else if (PageSwapCache(page))
319 else if (page_count(page))
320 shared += page_count(page) - 1;
322 printk(KERN_INFO "%d pages of RAM\n", total);
323 printk(KERN_INFO "%d pages of HIGHMEM\n", high_mem);
324 printk(KERN_INFO "%d reserved pages\n", reserved);
325 printk(KERN_INFO "%d pages shared\n", shared);
326 printk(KERN_INFO "%d pages swap cached\n", cached);
329 /* Allocate and free page tables. */
331 pgd_t *pgd_alloc(struct mm_struct *mm)
333 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
336 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
337 memcpy(pgd + USER_PTRS_PER_PGD,
338 swapper_pg_dir + USER_PTRS_PER_PGD,
339 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
344 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
346 free_page((unsigned long) pgd);
349 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
353 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
357 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
361 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
363 pgtable_page_ctor(pte);
367 #ifdef CONFIG_3_LEVEL_PGTABLES
368 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
370 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
373 memset(pmd, 0, PAGE_SIZE);
379 void *uml_kmalloc(int size, int flags)
381 return kmalloc(size, flags);