2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/poison.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
36 #include <asm/fixmap.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
43 #include <asm/sections.h>
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
69 printk(KERN_INFO "Mem-info:\n");
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_online_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
77 if (PageReserved(page))
79 else if (PageSwapCache(page))
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
93 static __init void *spp_getpage(void)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103 Dprintk("spp_getpage %p\n", ptr);
107 static __init void set_pte_phys(unsigned long vaddr,
108 unsigned long phys, pgprot_t prot)
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 set_pte(pte, new_pte);
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
152 __flush_tlb_one(vaddr);
155 /* NOTE: this is meant to be run only at boot */
157 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 extern pmd_t temp_boot_pmds[];
172 static struct temp_map {
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 static __meminit void *alloc_low_page(int *index, unsigned long *phys)
186 unsigned long pfn = table_end++, paddr;
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
207 memset(adr, 0, PAGE_SIZE);
209 *phys = pfn * PAGE_SIZE;
213 static __meminit void unmap_low_page(int i)
220 ti = &temp_mappings[i];
221 set_pmd(ti->pmd, __pmd(0));
225 /* Must run before zap_low_mappings */
226 __init void *early_ioremap(unsigned long addr, unsigned long size)
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
232 printk("SMBIOS area too long %lu\n", size);
235 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
236 map += LARGE_PAGE_SIZE;
237 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
239 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
242 /* To avoid virtual aliases later */
243 __init void early_iounmap(void *addr, unsigned long size)
245 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
246 printk("early_iounmap: bad address %p\n", addr);
247 set_pmd(temp_mappings[0].pmd, __pmd(0));
248 set_pmd(temp_mappings[1].pmd, __pmd(0));
252 static void __meminit
253 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
255 int i = pmd_index(address);
257 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
259 pmd_t *pmd = pmd_page + pmd_index(address);
261 if (address >= end) {
263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
271 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
272 entry &= __supported_pte_mask;
273 set_pmd(pmd, __pmd(entry));
277 static void __meminit
278 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
280 pmd_t *pmd = pmd_offset(pud,0);
281 spin_lock(&init_mm.page_table_lock);
282 phys_pmd_init(pmd, address, end);
283 spin_unlock(&init_mm.page_table_lock);
287 static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
289 int i = pud_index(addr);
292 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
294 unsigned long pmd_phys;
295 pud_t *pud = pud_page + pud_index(addr);
301 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
302 set_pud(pud, __pud(0));
307 phys_pmd_update(pud, addr, end);
311 pmd = alloc_low_page(&map, &pmd_phys);
312 spin_lock(&init_mm.page_table_lock);
313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
314 phys_pmd_init(pmd, addr, end);
315 spin_unlock(&init_mm.page_table_lock);
321 static void __init find_early_table_space(unsigned long end)
323 unsigned long puds, pmds, tables, start;
325 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
326 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
327 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
328 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
330 /* RED-PEN putting page tables only on node 0 could
331 cause a hotspot and fill up ZONE_DMA. The page tables
332 need roughly 0.5KB per GB. */
334 table_start = find_e820_area(start, end, tables);
335 if (table_start == -1UL)
336 panic("Cannot find space for the kernel page tables");
338 table_start >>= PAGE_SHIFT;
339 table_end = table_start;
341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
342 end, table_start << PAGE_SHIFT,
343 (table_start << PAGE_SHIFT) + tables);
346 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
347 This runs before bootmem is initialized and gets pages directly from the
348 physical memory. To access them they are temporarily mapped. */
349 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
353 Dprintk("init_memory_mapping\n");
356 * Find space for the kernel direct mapping tables.
357 * Later we should allocate these tables in the local node of the memory
358 * mapped. Unfortunately this is done currently before the nodes are
362 find_early_table_space(end);
364 start = (unsigned long)__va(start);
365 end = (unsigned long)__va(end);
367 for (; start < end; start = next) {
369 unsigned long pud_phys;
370 pgd_t *pgd = pgd_offset_k(start);
374 pud = pud_offset(pgd, start & PGDIR_MASK);
376 pud = alloc_low_page(&map, &pud_phys);
378 next = start + PGDIR_SIZE;
381 phys_pud_init(pud, __pa(start), __pa(next));
383 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
388 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
392 void __cpuinit zap_low_mappings(int cpu)
395 pgd_t *pgd = pgd_offset_k(0UL);
399 * For AP's, zap the low identity mappings by changing the cr3
400 * to init_level4_pgt and doing local flush tlb all
402 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
407 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
409 size_zones(unsigned long *z, unsigned long *h,
410 unsigned long start_pfn, unsigned long end_pfn)
415 for (i = 0; i < MAX_NR_ZONES; i++)
418 if (start_pfn < MAX_DMA_PFN)
419 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
420 if (start_pfn < MAX_DMA32_PFN) {
421 unsigned long dma32_pfn = MAX_DMA32_PFN;
422 if (dma32_pfn > end_pfn)
424 z[ZONE_DMA32] = dma32_pfn - start_pfn;
426 z[ZONE_NORMAL] = end_pfn - start_pfn;
428 /* Remove lower zones from higher ones. */
430 for (i = 0; i < MAX_NR_ZONES; i++) {
438 for (i = 0; i < MAX_NR_ZONES; i++) {
441 h[i] = e820_hole_size(s, w);
444 /* Add the space pace needed for mem_map to the holes too. */
445 for (i = 0; i < MAX_NR_ZONES; i++)
446 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
448 /* The 16MB DMA zone has the kernel and other misc mappings.
451 h[ZONE_DMA] += dma_reserve;
452 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
454 "Kernel too large and filling up ZONE_DMA?\n");
455 h[ZONE_DMA] = z[ZONE_DMA];
461 void __init paging_init(void)
463 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
465 memory_present(0, 0, end_pfn);
467 size_zones(zones, holes, 0, end_pfn);
468 free_area_init_node(0, NODE_DATA(0), zones,
469 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
473 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
474 from the CPU leading to inconsistent cache lines. address and size
475 must be aligned to 2MB boundaries.
476 Does nothing when the mapping doesn't exist. */
477 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
479 unsigned long end = address + size;
481 BUG_ON(address & ~LARGE_PAGE_MASK);
482 BUG_ON(size & ~LARGE_PAGE_MASK);
484 for (; address < end; address += LARGE_PAGE_SIZE) {
485 pgd_t *pgd = pgd_offset_k(address);
490 pud = pud_offset(pgd, address);
493 pmd = pmd_offset(pud, address);
494 if (!pmd || pmd_none(*pmd))
496 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
497 /* Could handle this, but it should not happen currently. */
499 "clear_kernel_mapping: mapping has been split. will leak memory\n");
502 set_pmd(pmd, __pmd(0));
508 * Memory hotplug specific functions
510 void online_page(struct page *page)
512 ClearPageReserved(page);
513 init_page_count(page);
519 #ifdef CONFIG_MEMORY_HOTPLUG
521 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
522 * via probe interface of sysfs. If acpi notifies hot-add event, then it
523 * can tell node id by searching dsdt. But, probe interface doesn't have
524 * node id. So, return 0 as node id at this time.
527 int memory_add_physaddr_to_nid(u64 start)
534 * Memory is added always to NORMAL zone. This means you will never get
535 * additional DMA/DMA32 memory.
537 int arch_add_memory(int nid, u64 start, u64 size)
539 struct pglist_data *pgdat = NODE_DATA(nid);
540 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
541 unsigned long start_pfn = start >> PAGE_SHIFT;
542 unsigned long nr_pages = size >> PAGE_SHIFT;
545 ret = __add_pages(zone, start_pfn, nr_pages);
549 init_memory_mapping(start, (start + size -1));
553 printk("%s: Problem encountered in __add_pages!\n", __func__);
556 EXPORT_SYMBOL_GPL(arch_add_memory);
558 int remove_memory(u64 start, u64 size)
562 EXPORT_SYMBOL_GPL(remove_memory);
564 #else /* CONFIG_MEMORY_HOTPLUG */
566 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
567 * just online the pages.
569 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
573 unsigned long total = 0, mem = 0;
574 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
575 if (pfn_valid(pfn)) {
576 online_page(pfn_to_page(pfn));
583 z->spanned_pages += total;
584 z->present_pages += mem;
585 z->zone_pgdat->node_spanned_pages += total;
586 z->zone_pgdat->node_present_pages += mem;
590 #endif /* CONFIG_MEMORY_HOTPLUG */
592 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
595 void __init mem_init(void)
597 long codesize, reservedpages, datasize, initsize;
601 /* clear the zero-page */
602 memset(empty_zero_page, 0, PAGE_SIZE);
606 /* this will put all low memory onto the freelists */
608 totalram_pages = numa_free_all_bootmem();
610 totalram_pages = free_all_bootmem();
612 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
616 codesize = (unsigned long) &_etext - (unsigned long) &_text;
617 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
618 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
620 /* Register memory areas for /proc/kcore */
621 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
622 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
623 VMALLOC_END-VMALLOC_START);
624 kclist_add(&kcore_kernel, &_stext, _end - _stext);
625 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
626 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
627 VSYSCALL_END - VSYSCALL_START);
629 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
630 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
631 end_pfn << (PAGE_SHIFT-10),
633 reservedpages << (PAGE_SHIFT-10),
639 * Sync boot_level4_pgt mappings with the init_level4_pgt
640 * except for the low identity mappings which are already zapped
641 * in init_level4_pgt. This sync-up is essential for AP's bringup
643 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
647 void free_init_pages(char *what, unsigned long begin, unsigned long end)
654 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
655 for (addr = begin; addr < end; addr += PAGE_SIZE) {
656 ClearPageReserved(virt_to_page(addr));
657 init_page_count(virt_to_page(addr));
658 memset((void *)(addr & ~(PAGE_SIZE-1)),
659 POISON_FREE_INITMEM, PAGE_SIZE);
665 void free_initmem(void)
667 memset(__initdata_begin, POISON_FREE_INITDATA,
668 __initdata_end - __initdata_begin);
669 free_init_pages("unused kernel memory",
670 (unsigned long)(&__init_begin),
671 (unsigned long)(&__init_end));
674 #ifdef CONFIG_DEBUG_RODATA
676 void mark_rodata_ro(void)
678 unsigned long addr = (unsigned long)__start_rodata;
680 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
681 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
683 printk ("Write protecting the kernel read-only data: %luk\n",
684 (__end_rodata - __start_rodata) >> 10);
687 * change_page_attr_addr() requires a global_flush_tlb() call after it.
688 * We do this after the printk so that if something went wrong in the
689 * change, the printk gets out at least to give a better debug hint
690 * of who is the culprit.
696 #ifdef CONFIG_BLK_DEV_INITRD
697 void free_initrd_mem(unsigned long start, unsigned long end)
699 free_init_pages("initrd memory", start, end);
703 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
705 /* Should check here against the e820 map to avoid double free */
707 int nid = phys_to_nid(phys);
708 reserve_bootmem_node(NODE_DATA(nid), phys, len);
710 reserve_bootmem(phys, len);
712 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
713 dma_reserve += len / PAGE_SIZE;
716 int kern_addr_valid(unsigned long addr)
718 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
724 if (above != 0 && above != -1UL)
727 pgd = pgd_offset_k(addr);
731 pud = pud_offset(pgd, addr);
735 pmd = pmd_offset(pud, addr);
739 return pfn_valid(pmd_pfn(*pmd));
741 pte = pte_offset_kernel(pmd, addr);
744 return pfn_valid(pte_pfn(*pte));
748 #include <linux/sysctl.h>
750 extern int exception_trace, page_fault_trace;
752 static ctl_table debug_table2[] = {
753 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
758 static ctl_table debug_root_table2[] = {
759 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
760 .child = debug_table2 },
764 static __init int x8664_sysctl_init(void)
766 register_sysctl_table(debug_root_table2, 1);
769 __initcall(x8664_sysctl_init);
772 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
773 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
774 not need special handling anymore. */
776 static struct vm_area_struct gate_vma = {
777 .vm_start = VSYSCALL_START,
778 .vm_end = VSYSCALL_END,
779 .vm_page_prot = PAGE_READONLY
782 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
784 #ifdef CONFIG_IA32_EMULATION
785 if (test_tsk_thread_flag(tsk, TIF_IA32))
791 int in_gate_area(struct task_struct *task, unsigned long addr)
793 struct vm_area_struct *vma = get_gate_vma(task);
796 return (addr >= vma->vm_start) && (addr < vma->vm_end);
799 /* Use this when you have no reliable task/vma, typically from interrupt
800 * context. It is less reliable than using the task's vma and may give
803 int in_gate_area_no_task(unsigned long addr)
805 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);