2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/pci.h>
26 #include <linux/dma-mapping.h>
28 #include <asm/processor.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/pgalloc.h>
34 #include <asm/fixmap.h>
38 #include <asm/mmu_context.h>
39 #include <asm/proto.h>
41 #include <asm/sections.h>
42 #include <asm/dma-mapping.h>
43 #include <asm/swiotlb.h>
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
69 printk(KERN_INFO "Mem-info:\n");
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
77 if (PageReserved(page))
79 else if (PageSwapCache(page))
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
91 /* References to section boundaries */
95 static void *spp_getpage(void)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
105 Dprintk("spp_getpage %p\n", ptr);
109 static void set_pte_phys(unsigned long vaddr,
110 unsigned long phys, pgprot_t prot)
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
119 pgd = pgd_offset_k(vaddr);
120 if (pgd_none(*pgd)) {
121 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
124 pud = pud_offset(pgd, vaddr);
125 if (pud_none(*pud)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pud, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
133 pmd = pmd_offset(pud, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
148 set_pte(pte, new_pte);
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
154 __flush_tlb_one(vaddr);
157 /* NOTE: this is meant to be run only at boot */
158 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
160 unsigned long address = __fix_to_virt(idx);
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
166 set_pte_phys(address, phys, prot);
169 unsigned long __initdata table_start, table_end;
171 extern pmd_t temp_boot_pmds[];
173 static struct temp_map {
177 } temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
183 static __init void *alloc_low_page(int *index, unsigned long *phys)
187 unsigned long pfn = table_end++, paddr;
191 panic("alloc_low_page: ran out of memory");
192 for (i = 0; temp_mappings[i].allocated; i++) {
193 if (!temp_mappings[i].pmd)
194 panic("alloc_low_page: ran out of temp mappings");
196 ti = &temp_mappings[i];
197 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
198 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
201 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
203 *phys = pfn * PAGE_SIZE;
207 static __init void unmap_low_page(int i)
209 struct temp_map *ti = &temp_mappings[i];
210 set_pmd(ti->pmd, __pmd(0));
214 static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
218 i = pud_index(address);
220 for (; i < PTRS_PER_PUD; pud++, i++) {
222 unsigned long paddr, pmd_phys;
225 paddr = address + i*PUD_SIZE;
227 for (; i < PTRS_PER_PUD; i++, pud++)
228 set_pud(pud, __pud(0));
232 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
233 set_pud(pud, __pud(0));
237 pmd = alloc_low_page(&map, &pmd_phys);
238 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
239 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
243 for (; j < PTRS_PER_PMD; j++, pmd++)
244 set_pmd(pmd, __pmd(0));
247 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
248 pe &= __supported_pte_mask;
249 set_pmd(pmd, __pmd(pe));
256 static void __init find_early_table_space(unsigned long end)
258 unsigned long puds, pmds, tables, start;
260 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
261 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
262 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
263 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
265 /* Put page tables beyond the DMA zones if possible.
266 RED-PEN might be better to spread them out more over
267 memory to avoid hotspots */
268 if (end > MAX_DMA32_PFN<<PAGE_SHIFT)
269 start = MAX_DMA32_PFN << PAGE_SHIFT;
270 else if (end > MAX_DMA_PFN << PAGE_SHIFT)
271 start = MAX_DMA_PFN << PAGE_SHIFT;
275 table_start = find_e820_area(start, end, tables);
276 if (table_start == -1)
277 table_start = find_e820_area(0x8000, end, tables);
278 if (table_start == -1UL)
279 panic("Cannot find space for the kernel page tables");
281 table_start >>= PAGE_SHIFT;
282 table_end = table_start;
285 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
286 This runs before bootmem is initialized and gets pages directly from the
287 physical memory. To access them they are temporarily mapped. */
288 void __init init_memory_mapping(unsigned long start, unsigned long end)
292 Dprintk("init_memory_mapping\n");
295 * Find space for the kernel direct mapping tables.
296 * Later we should allocate these tables in the local node of the memory
297 * mapped. Unfortunately this is done currently before the nodes are
300 find_early_table_space(end);
302 start = (unsigned long)__va(start);
303 end = (unsigned long)__va(end);
305 for (; start < end; start = next) {
307 unsigned long pud_phys;
308 pud_t *pud = alloc_low_page(&map, &pud_phys);
309 next = start + PGDIR_SIZE;
312 phys_pud_init(pud, __pa(start), __pa(next));
313 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
317 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
319 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
320 table_start<<PAGE_SHIFT,
321 table_end<<PAGE_SHIFT);
324 void __cpuinit zap_low_mappings(int cpu)
327 pgd_t *pgd = pgd_offset_k(0UL);
331 * For AP's, zap the low identity mappings by changing the cr3
332 * to init_level4_pgt and doing local flush tlb all
334 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
339 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
341 size_zones(unsigned long *z, unsigned long *h,
342 unsigned long start_pfn, unsigned long end_pfn)
347 for (i = 0; i < MAX_NR_ZONES; i++)
350 if (start_pfn < MAX_DMA_PFN)
351 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
352 if (start_pfn < MAX_DMA32_PFN) {
353 unsigned long dma32_pfn = MAX_DMA32_PFN;
354 if (dma32_pfn > end_pfn)
356 z[ZONE_DMA32] = dma32_pfn - start_pfn;
358 z[ZONE_NORMAL] = end_pfn - start_pfn;
360 /* Remove lower zones from higher ones. */
362 for (i = 0; i < MAX_NR_ZONES; i++) {
370 for (i = 0; i < MAX_NR_ZONES; i++) {
373 h[i] = e820_hole_size(s, w);
376 /* Add the space pace needed for mem_map to the holes too. */
377 for (i = 0; i < MAX_NR_ZONES; i++)
378 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
380 /* The 16MB DMA zone has the kernel and other misc mappings.
383 h[ZONE_DMA] += dma_reserve;
384 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
386 "Kernel too large and filling up ZONE_DMA?\n");
387 h[ZONE_DMA] = z[ZONE_DMA];
393 void __init paging_init(void)
395 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
396 size_zones(zones, holes, 0, end_pfn);
397 free_area_init_node(0, NODE_DATA(0), zones,
398 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
402 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
403 from the CPU leading to inconsistent cache lines. address and size
404 must be aligned to 2MB boundaries.
405 Does nothing when the mapping doesn't exist. */
406 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
408 unsigned long end = address + size;
410 BUG_ON(address & ~LARGE_PAGE_MASK);
411 BUG_ON(size & ~LARGE_PAGE_MASK);
413 for (; address < end; address += LARGE_PAGE_SIZE) {
414 pgd_t *pgd = pgd_offset_k(address);
419 pud = pud_offset(pgd, address);
422 pmd = pmd_offset(pud, address);
423 if (!pmd || pmd_none(*pmd))
425 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
426 /* Could handle this, but it should not happen currently. */
428 "clear_kernel_mapping: mapping has been split. will leak memory\n");
431 set_pmd(pmd, __pmd(0));
436 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
439 void __init mem_init(void)
441 long codesize, reservedpages, datasize, initsize;
443 #ifdef CONFIG_SWIOTLB
448 /* How many end-of-memory variables you have, grandma! */
449 max_low_pfn = end_pfn;
451 num_physpages = end_pfn;
452 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
454 /* clear the zero-page */
455 memset(empty_zero_page, 0, PAGE_SIZE);
459 /* this will put all low memory onto the freelists */
461 totalram_pages = numa_free_all_bootmem();
463 totalram_pages = free_all_bootmem();
465 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
469 codesize = (unsigned long) &_etext - (unsigned long) &_text;
470 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
471 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
473 /* Register memory areas for /proc/kcore */
474 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
475 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
476 VMALLOC_END-VMALLOC_START);
477 kclist_add(&kcore_kernel, &_stext, _end - _stext);
478 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
479 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
480 VSYSCALL_END - VSYSCALL_START);
482 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
483 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
484 end_pfn << (PAGE_SHIFT-10),
486 reservedpages << (PAGE_SHIFT-10),
492 * Sync boot_level4_pgt mappings with the init_level4_pgt
493 * except for the low identity mappings which are already zapped
494 * in init_level4_pgt. This sync-up is essential for AP's bringup
496 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
500 void free_initmem(void)
504 addr = (unsigned long)(&__init_begin);
505 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
506 ClearPageReserved(virt_to_page(addr));
507 set_page_count(virt_to_page(addr), 1);
508 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
512 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
513 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
516 #ifdef CONFIG_DEBUG_RODATA
518 extern char __start_rodata, __end_rodata;
519 void mark_rodata_ro(void)
521 unsigned long addr = (unsigned long)&__start_rodata;
523 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
524 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
526 printk ("Write protecting the kernel read-only data: %luk\n",
527 (&__end_rodata - &__start_rodata) >> 10);
530 * change_page_attr_addr() requires a global_flush_tlb() call after it.
531 * We do this after the printk so that if something went wrong in the
532 * change, the printk gets out at least to give a better debug hint
533 * of who is the culprit.
539 #ifdef CONFIG_BLK_DEV_INITRD
540 void free_initrd_mem(unsigned long start, unsigned long end)
542 if (start < (unsigned long)&_end)
544 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
545 for (; start < end; start += PAGE_SIZE) {
546 ClearPageReserved(virt_to_page(start));
547 set_page_count(virt_to_page(start), 1);
554 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
556 /* Should check here against the e820 map to avoid double free */
558 int nid = phys_to_nid(phys);
559 reserve_bootmem_node(NODE_DATA(nid), phys, len);
561 reserve_bootmem(phys, len);
563 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
564 dma_reserve += len / PAGE_SIZE;
567 int kern_addr_valid(unsigned long addr)
569 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
575 if (above != 0 && above != -1UL)
578 pgd = pgd_offset_k(addr);
582 pud = pud_offset(pgd, addr);
586 pmd = pmd_offset(pud, addr);
590 return pfn_valid(pmd_pfn(*pmd));
592 pte = pte_offset_kernel(pmd, addr);
595 return pfn_valid(pte_pfn(*pte));
599 #include <linux/sysctl.h>
601 extern int exception_trace, page_fault_trace;
603 static ctl_table debug_table2[] = {
604 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
609 static ctl_table debug_root_table2[] = {
610 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
611 .child = debug_table2 },
615 static __init int x8664_sysctl_init(void)
617 register_sysctl_table(debug_root_table2, 1);
620 __initcall(x8664_sysctl_init);
623 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
624 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
625 not need special handling anymore. */
627 static struct vm_area_struct gate_vma = {
628 .vm_start = VSYSCALL_START,
629 .vm_end = VSYSCALL_END,
630 .vm_page_prot = PAGE_READONLY
633 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
635 #ifdef CONFIG_IA32_EMULATION
636 if (test_tsk_thread_flag(tsk, TIF_IA32))
642 int in_gate_area(struct task_struct *task, unsigned long addr)
644 struct vm_area_struct *vma = get_gate_vma(task);
647 return (addr >= vma->vm_start) && (addr < vma->vm_end);
650 /* Use this when you have no reliable task/vma, typically from interrupt
651 * context. It is less reliable than using the task's vma and may give
654 int in_gate_area_no_task(unsigned long addr)
656 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);