3 * Copyright (C) 1995 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
18 #include <linux/hugetlb.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/pci.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
35 #include <asm/bios_ebda.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
41 #include <asm/fixmap.h>
46 #include <asm/tlbflush.h>
47 #include <asm/pgalloc.h>
48 #include <asm/sections.h>
49 #include <asm/paravirt.h>
50 #include <asm/setup.h>
51 #include <asm/cacheflush.h>
54 unsigned int __VMALLOC_RESERVE = 128 << 20;
56 unsigned long max_low_pfn_mapped;
57 unsigned long max_pfn_mapped;
59 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
60 unsigned long highstart_pfn, highend_pfn;
62 static noinline int do_test_wp_bit(void);
65 static unsigned long __initdata table_start;
66 static unsigned long __meminitdata table_end;
67 static unsigned long __meminitdata table_top;
69 static int __initdata after_init_bootmem;
71 static __init void *alloc_low_page(void)
73 unsigned long pfn = table_end++;
77 panic("alloc_low_page: ran out of memory");
79 adr = __va(pfn * PAGE_SIZE);
80 memset(adr, 0, PAGE_SIZE);
85 * Creates a middle page table and puts a pointer to it in the
86 * given global directory entry. This only returns the gd entry
87 * in non-PAE compilation mode, since the middle layer is folded.
89 static pmd_t * __init one_md_table_init(pgd_t *pgd)
95 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
96 if (after_init_bootmem)
97 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
99 pmd_table = (pmd_t *)alloc_low_page();
100 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
101 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
102 pud = pud_offset(pgd, 0);
103 BUG_ON(pmd_table != pmd_offset(pud, 0));
108 pud = pud_offset(pgd, 0);
109 pmd_table = pmd_offset(pud, 0);
115 * Create a page table and place a pointer to it in a middle page
118 static pte_t * __init one_page_table_init(pmd_t *pmd)
120 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
121 pte_t *page_table = NULL;
123 if (after_init_bootmem) {
124 #ifdef CONFIG_DEBUG_PAGEALLOC
125 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
129 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
131 page_table = (pte_t *)alloc_low_page();
133 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
134 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
135 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
138 return pte_offset_kernel(pmd, 0);
142 * This function initializes a certain range of kernel virtual memory
143 * with new bootmem page tables, everywhere page tables are missing in
146 * NOTE: The pagetables are allocated contiguous on the physical space
147 * so we can cache the place of the first one and move around without
148 * checking the pgd every time.
151 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
153 int pgd_idx, pmd_idx;
159 pgd_idx = pgd_index(vaddr);
160 pmd_idx = pmd_index(vaddr);
161 pgd = pgd_base + pgd_idx;
163 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
164 pmd = one_md_table_init(pgd);
165 pmd = pmd + pmd_index(vaddr);
166 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
168 one_page_table_init(pmd);
176 static inline int is_kernel_text(unsigned long addr)
178 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
184 * This maps the physical memory to kernel virtual address space, a total
185 * of max_low_pfn pages, by creating page tables starting from address
188 static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
189 unsigned long start_pfn,
190 unsigned long end_pfn,
193 int pgd_idx, pmd_idx, pte_ofs;
198 unsigned pages_2m, pages_4k;
202 * First iteration will setup identity mapping using large/small pages
203 * based on use_pse, with other attributes same as set by
204 * the early code in head_32.S
206 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
207 * as desired for the kernel identity mapping.
209 * This two pass mechanism conforms to the TLB app note which says:
211 * "Software should not write to a paging-structure entry in a way
212 * that would change, for any linear address, both the page size
213 * and either the page frame or attributes."
221 pages_2m = pages_4k = 0;
223 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
224 pgd = pgd_base + pgd_idx;
225 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
226 pmd = one_md_table_init(pgd);
230 #ifdef CONFIG_X86_PAE
231 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
236 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
238 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
241 * Map with big pages if possible, otherwise
242 * create normal page tables:
246 pgprot_t prot = PAGE_KERNEL_LARGE;
248 * first pass will use the same initial
249 * identity mapping attribute + _PAGE_PSE.
252 __pgprot(PTE_IDENT_ATTR |
255 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
256 PAGE_OFFSET + PAGE_SIZE-1;
258 if (is_kernel_text(addr) ||
259 is_kernel_text(addr2))
260 prot = PAGE_KERNEL_LARGE_EXEC;
263 if (mapping_iter == 1)
264 set_pmd(pmd, pfn_pmd(pfn, init_prot));
266 set_pmd(pmd, pfn_pmd(pfn, prot));
271 pte = one_page_table_init(pmd);
273 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
275 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
276 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
277 pgprot_t prot = PAGE_KERNEL;
279 * first pass will use the same initial
280 * identity mapping attribute.
282 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
284 if (is_kernel_text(addr))
285 prot = PAGE_KERNEL_EXEC;
288 if (mapping_iter == 1)
289 set_pte(pte, pfn_pte(pfn, init_prot));
291 set_pte(pte, pfn_pte(pfn, prot));
295 if (mapping_iter == 1) {
297 * update direct mapping page count only in the first
300 update_page_count(PG_LEVEL_2M, pages_2m);
301 update_page_count(PG_LEVEL_4K, pages_4k);
304 * local global flush tlb, which will flush the previous
305 * mappings present in both small and large page TLB's.
310 * Second iteration will set the actual desired PTE attributes.
318 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
319 * is valid. The argument is a physical page number.
322 * On x86, access has to be given to the first megabyte of ram because that area
323 * contains bios code and data regions used by X and dosemu and similar apps.
324 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
325 * mmio resources as well as potential bios/acpi data regions.
327 int devmem_is_allowed(unsigned long pagenr)
331 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
333 if (!page_is_ram(pagenr))
341 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
343 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
344 vaddr), vaddr), vaddr);
347 static void __init kmap_init(void)
349 unsigned long kmap_vstart;
352 * Cache the first kmap pte:
354 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
355 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
357 kmap_prot = PAGE_KERNEL;
360 #ifdef CONFIG_HIGHMEM
361 static void __init permanent_kmaps_init(pgd_t *pgd_base)
370 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
372 pgd = swapper_pg_dir + pgd_index(vaddr);
373 pud = pud_offset(pgd, vaddr);
374 pmd = pmd_offset(pud, vaddr);
375 pte = pte_offset_kernel(pmd, vaddr);
376 pkmap_page_table = pte;
379 static void __init add_one_highpage_init(struct page *page, int pfn)
381 ClearPageReserved(page);
382 init_page_count(page);
387 struct add_highpages_data {
388 unsigned long start_pfn;
389 unsigned long end_pfn;
392 static int __init add_highpages_work_fn(unsigned long start_pfn,
393 unsigned long end_pfn, void *datax)
397 unsigned long final_start_pfn, final_end_pfn;
398 struct add_highpages_data *data;
400 data = (struct add_highpages_data *)datax;
402 final_start_pfn = max(start_pfn, data->start_pfn);
403 final_end_pfn = min(end_pfn, data->end_pfn);
404 if (final_start_pfn >= final_end_pfn)
407 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
409 if (!pfn_valid(node_pfn))
411 page = pfn_to_page(node_pfn);
412 add_one_highpage_init(page, node_pfn);
419 void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
420 unsigned long end_pfn)
422 struct add_highpages_data data;
424 data.start_pfn = start_pfn;
425 data.end_pfn = end_pfn;
427 work_with_active_regions(nid, add_highpages_work_fn, &data);
431 static void __init set_highmem_pages_init(void)
433 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
435 totalram_pages += totalhigh_pages;
437 #endif /* !CONFIG_NUMA */
440 static inline void permanent_kmaps_init(pgd_t *pgd_base)
443 static inline void set_highmem_pages_init(void)
446 #endif /* CONFIG_HIGHMEM */
448 void __init native_pagetable_setup_start(pgd_t *base)
450 unsigned long pfn, va;
457 * Remove any mappings which extend past the end of physical
458 * memory from the boot time page table:
460 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
461 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
462 pgd = base + pgd_index(va);
463 if (!pgd_present(*pgd))
466 pud = pud_offset(pgd, va);
467 pmd = pmd_offset(pud, va);
468 if (!pmd_present(*pmd))
471 pte = pte_offset_kernel(pmd, va);
472 if (!pte_present(*pte))
475 pte_clear(NULL, va, pte);
477 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
480 void __init native_pagetable_setup_done(pgd_t *base)
485 * Build a proper pagetable for the kernel mappings. Up until this
486 * point, we've been running on some set of pagetables constructed by
489 * If we're booting on native hardware, this will be a pagetable
490 * constructed in arch/x86/kernel/head_32.S. The root of the
491 * pagetable will be swapper_pg_dir.
493 * If we're booting paravirtualized under a hypervisor, then there are
494 * more options: we may already be running PAE, and the pagetable may
495 * or may not be based in swapper_pg_dir. In any case,
496 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
497 * appropriately for the rest of the initialization to work.
499 * In general, pagetable_init() assumes that the pagetable may already
500 * be partially populated, and so it avoids stomping on any existing
503 static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
505 unsigned long vaddr, end;
508 * Fixed mappings, only the page table structure has to be
509 * created - mappings will be set by set_fixmap():
511 early_ioremap_clear();
512 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
513 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
514 page_table_range_init(vaddr, end, pgd_base);
515 early_ioremap_reset();
518 static void __init pagetable_init(void)
520 pgd_t *pgd_base = swapper_pg_dir;
522 permanent_kmaps_init(pgd_base);
525 #ifdef CONFIG_ACPI_SLEEP
527 * ACPI suspend needs this for resume, because things like the intel-agp
528 * driver might have split up a kernel 4MB mapping.
530 char swsusp_pg_dir[PAGE_SIZE]
531 __attribute__ ((aligned(PAGE_SIZE)));
533 static inline void save_pg_dir(void)
535 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
537 #else /* !CONFIG_ACPI_SLEEP */
538 static inline void save_pg_dir(void)
541 #endif /* !CONFIG_ACPI_SLEEP */
543 void zap_low_mappings(void)
548 * Zap initial low-memory mappings.
550 * Note that "pgd_clear()" doesn't do it for
551 * us, because pgd_clear() is a no-op on i386.
553 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
554 #ifdef CONFIG_X86_PAE
555 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
557 set_pgd(swapper_pg_dir+i, __pgd(0));
565 pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
566 EXPORT_SYMBOL_GPL(__supported_pte_mask);
568 #ifdef CONFIG_X86_PAE
570 static int disable_nx __initdata;
575 * Control non executable mappings.
580 static int __init noexec_setup(char *str)
582 if (!str || !strcmp(str, "on")) {
584 __supported_pte_mask |= _PAGE_NX;
588 if (!strcmp(str, "off")) {
590 __supported_pte_mask &= ~_PAGE_NX;
598 early_param("noexec", noexec_setup);
600 static void __init set_nx(void)
602 unsigned int v[4], l, h;
604 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
605 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
607 if ((v[3] & (1 << 20)) && !disable_nx) {
608 rdmsr(MSR_EFER, l, h);
610 wrmsr(MSR_EFER, l, h);
612 __supported_pte_mask |= _PAGE_NX;
618 /* user-defined highmem size */
619 static unsigned int highmem_pages = -1;
622 * highmem=size forces highmem to be exactly 'size' bytes.
623 * This works even on boxes that have no highmem otherwise.
624 * This also works to reduce highmem size on bigger boxes.
626 static int __init parse_highmem(char *arg)
631 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
634 early_param("highmem", parse_highmem);
637 * Determine low and high memory ranges:
639 void __init find_low_pfn_range(void)
641 /* it could update max_pfn */
643 /* max_low_pfn is 0, we already have early_res support */
645 max_low_pfn = max_pfn;
646 if (max_low_pfn > MAXMEM_PFN) {
647 if (highmem_pages == -1)
648 highmem_pages = max_pfn - MAXMEM_PFN;
649 if (highmem_pages + MAXMEM_PFN < max_pfn)
650 max_pfn = MAXMEM_PFN + highmem_pages;
651 if (highmem_pages + MAXMEM_PFN > max_pfn) {
652 printk(KERN_WARNING "only %luMB highmem pages "
653 "available, ignoring highmem size of %uMB.\n",
654 pages_to_mb(max_pfn - MAXMEM_PFN),
655 pages_to_mb(highmem_pages));
658 max_low_pfn = MAXMEM_PFN;
659 #ifndef CONFIG_HIGHMEM
660 /* Maximum memory usable is what is directly addressable */
661 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
663 if (max_pfn > MAX_NONPAE_PFN)
665 "Use a HIGHMEM64G enabled kernel.\n");
667 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
668 max_pfn = MAXMEM_PFN;
669 #else /* !CONFIG_HIGHMEM */
670 #ifndef CONFIG_HIGHMEM64G
671 if (max_pfn > MAX_NONPAE_PFN) {
672 max_pfn = MAX_NONPAE_PFN;
673 printk(KERN_WARNING "Warning only 4GB will be used."
674 "Use a HIGHMEM64G enabled kernel.\n");
676 #endif /* !CONFIG_HIGHMEM64G */
677 #endif /* !CONFIG_HIGHMEM */
679 if (highmem_pages == -1)
681 #ifdef CONFIG_HIGHMEM
682 if (highmem_pages >= max_pfn) {
683 printk(KERN_ERR "highmem size specified (%uMB) is "
684 "bigger than pages available (%luMB)!.\n",
685 pages_to_mb(highmem_pages),
686 pages_to_mb(max_pfn));
690 if (max_low_pfn - highmem_pages <
691 64*1024*1024/PAGE_SIZE){
692 printk(KERN_ERR "highmem size %uMB results in "
693 "smaller than 64MB lowmem, ignoring it.\n"
694 , pages_to_mb(highmem_pages));
697 max_low_pfn -= highmem_pages;
701 printk(KERN_ERR "ignoring highmem size on non-highmem"
707 #ifndef CONFIG_NEED_MULTIPLE_NODES
708 void __init initmem_init(unsigned long start_pfn,
709 unsigned long end_pfn)
711 #ifdef CONFIG_HIGHMEM
712 highstart_pfn = highend_pfn = max_pfn;
713 if (max_pfn > max_low_pfn)
714 highstart_pfn = max_low_pfn;
715 memory_present(0, 0, highend_pfn);
716 e820_register_active_regions(0, 0, highend_pfn);
717 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
718 pages_to_mb(highend_pfn - highstart_pfn));
719 num_physpages = highend_pfn;
720 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
722 memory_present(0, 0, max_low_pfn);
723 e820_register_active_regions(0, 0, max_low_pfn);
724 num_physpages = max_low_pfn;
725 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
727 #ifdef CONFIG_FLATMEM
728 max_mapnr = num_physpages;
730 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
731 pages_to_mb(max_low_pfn));
733 setup_bootmem_allocator();
735 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
737 static void __init zone_sizes_init(void)
739 unsigned long max_zone_pfns[MAX_NR_ZONES];
740 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
741 max_zone_pfns[ZONE_DMA] =
742 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
743 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
744 #ifdef CONFIG_HIGHMEM
745 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
748 free_area_init_nodes(max_zone_pfns);
751 void __init setup_bootmem_allocator(void)
754 unsigned long bootmap_size, bootmap;
756 * Initialize the boot-time allocator (with low memory only):
758 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
759 bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
760 max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
763 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
764 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
766 /* don't touch min_low_pfn */
767 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
768 min_low_pfn, max_low_pfn);
769 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
770 max_pfn_mapped<<PAGE_SHIFT);
771 printk(KERN_INFO " low ram: %08lx - %08lx\n",
772 min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
773 printk(KERN_INFO " bootmap %08lx - %08lx\n",
774 bootmap, bootmap + bootmap_size);
775 for_each_online_node(i)
776 free_bootmem_with_active_regions(i, max_low_pfn);
777 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
779 after_init_bootmem = 1;
782 static void __init find_early_table_space(unsigned long end, int use_pse)
784 unsigned long puds, pmds, ptes, tables, start;
786 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
787 tables = PAGE_ALIGN(puds * sizeof(pud_t));
789 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
790 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
795 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
797 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
799 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
801 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
804 tables += PAGE_SIZE * 2;
807 * RED-PEN putting page tables only on node 0 could
808 * cause a hotspot and fill up ZONE_DMA. The page tables
809 * need roughly 0.5KB per GB.
812 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
814 if (table_start == -1UL)
815 panic("Cannot find space for the kernel page tables");
817 table_start >>= PAGE_SHIFT;
818 table_end = table_start;
819 table_top = table_start + (tables>>PAGE_SHIFT);
821 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
822 end, table_start << PAGE_SHIFT,
823 (table_start << PAGE_SHIFT) + tables);
826 unsigned long __init_refok init_memory_mapping(unsigned long start,
829 pgd_t *pgd_base = swapper_pg_dir;
830 unsigned long start_pfn, end_pfn;
831 unsigned long big_page_start;
832 #ifdef CONFIG_DEBUG_PAGEALLOC
834 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
835 * This will simplify cpa(), which otherwise needs to support splitting
836 * large pages into small in interrupt context, etc.
840 int use_pse = cpu_has_pse;
844 * Find space for the kernel direct mapping tables.
846 if (!after_init_bootmem)
847 find_early_table_space(end, use_pse);
849 #ifdef CONFIG_X86_PAE
852 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
855 /* Enable PSE if available */
857 set_in_cr4(X86_CR4_PSE);
859 /* Enable PGE if available */
861 set_in_cr4(X86_CR4_PGE);
862 __supported_pte_mask |= _PAGE_GLOBAL;
866 * Don't use a large page for the first 2/4MB of memory
867 * because there are often fixed size MTRRs in there
868 * and overlapping MTRRs into large pages can cause
871 big_page_start = PMD_SIZE;
873 if (start < big_page_start) {
874 start_pfn = start >> PAGE_SHIFT;
875 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
877 /* head is not big page alignment ? */
878 start_pfn = start >> PAGE_SHIFT;
879 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
880 << (PMD_SHIFT - PAGE_SHIFT);
882 if (start_pfn < end_pfn)
883 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
886 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
887 << (PMD_SHIFT - PAGE_SHIFT);
888 if (start_pfn < (big_page_start >> PAGE_SHIFT))
889 start_pfn = big_page_start >> PAGE_SHIFT;
890 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
891 if (start_pfn < end_pfn)
892 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
895 /* tail is not big page alignment ? */
897 if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
898 end_pfn = end >> PAGE_SHIFT;
899 if (start_pfn < end_pfn)
900 kernel_physical_mapping_init(pgd_base, start_pfn,
904 early_ioremap_page_table_range_init(pgd_base);
906 load_cr3(swapper_pg_dir);
910 if (!after_init_bootmem)
911 reserve_early(table_start << PAGE_SHIFT,
912 table_end << PAGE_SHIFT, "PGTABLE");
914 if (!after_init_bootmem)
915 early_memtest(start, end);
917 return end >> PAGE_SHIFT;
922 * paging_init() sets up the page tables - note that the first 8MB are
923 * already mapped by head.S.
925 * This routines also unmaps the page at virtual kernel address 0, so
926 * that we can trap those pesky NULL-reference errors in the kernel.
928 void __init paging_init(void)
937 * NOTE: at this point the bootmem allocator is fully available.
944 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
945 * and also on some strange 486's. All 586+'s are OK. This used to involve
946 * black magic jumps to work around some nasty CPU bugs, but fortunately the
947 * switch to using exceptions got rid of all that.
949 static void __init test_wp_bit(void)
952 "Checking if this processor honours the WP bit even in supervisor mode...");
954 /* Any page-aligned address will do, the test is non-destructive */
955 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
956 boot_cpu_data.wp_works_ok = do_test_wp_bit();
957 clear_fixmap(FIX_WP_TEST);
959 if (!boot_cpu_data.wp_works_ok) {
960 printk(KERN_CONT "No.\n");
961 #ifdef CONFIG_X86_WP_WORKS_OK
963 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
966 printk(KERN_CONT "Ok.\n");
970 static struct kcore_list kcore_mem, kcore_vmalloc;
972 void __init mem_init(void)
974 int codesize, reservedpages, datasize, initsize;
979 #ifdef CONFIG_FLATMEM
982 /* this will put all low memory onto the freelists */
983 totalram_pages += free_all_bootmem();
986 for (tmp = 0; tmp < max_low_pfn; tmp++)
988 * Only count reserved RAM pages:
990 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
993 set_highmem_pages_init();
995 codesize = (unsigned long) &_etext - (unsigned long) &_text;
996 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
997 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
999 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
1000 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
1001 VMALLOC_END-VMALLOC_START);
1003 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
1004 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
1005 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
1006 num_physpages << (PAGE_SHIFT-10),
1008 reservedpages << (PAGE_SHIFT-10),
1011 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
1014 printk(KERN_INFO "virtual kernel memory layout:\n"
1015 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
1016 #ifdef CONFIG_HIGHMEM
1017 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
1019 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
1020 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
1021 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
1022 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
1023 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
1024 FIXADDR_START, FIXADDR_TOP,
1025 (FIXADDR_TOP - FIXADDR_START) >> 10,
1027 #ifdef CONFIG_HIGHMEM
1028 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
1029 (LAST_PKMAP*PAGE_SIZE) >> 10,
1032 VMALLOC_START, VMALLOC_END,
1033 (VMALLOC_END - VMALLOC_START) >> 20,
1035 (unsigned long)__va(0), (unsigned long)high_memory,
1036 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
1038 (unsigned long)&__init_begin, (unsigned long)&__init_end,
1039 ((unsigned long)&__init_end -
1040 (unsigned long)&__init_begin) >> 10,
1042 (unsigned long)&_etext, (unsigned long)&_edata,
1043 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
1045 (unsigned long)&_text, (unsigned long)&_etext,
1046 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
1049 * Check boundaries twice: Some fundamental inconsistencies can
1050 * be detected at build time already.
1052 #define __FIXADDR_TOP (-PAGE_SIZE)
1053 #ifdef CONFIG_HIGHMEM
1054 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1055 BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
1057 #define high_memory (-128UL << 20)
1058 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
1060 #undef __FIXADDR_TOP
1062 #ifdef CONFIG_HIGHMEM
1063 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1064 BUG_ON(VMALLOC_END > PKMAP_BASE);
1066 BUG_ON(VMALLOC_START >= VMALLOC_END);
1067 BUG_ON((unsigned long)high_memory > VMALLOC_START);
1069 if (boot_cpu_data.wp_works_ok < 0)
1076 #ifdef CONFIG_MEMORY_HOTPLUG
1077 int arch_add_memory(int nid, u64 start, u64 size)
1079 struct pglist_data *pgdata = NODE_DATA(nid);
1080 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
1081 unsigned long start_pfn = start >> PAGE_SHIFT;
1082 unsigned long nr_pages = size >> PAGE_SHIFT;
1084 return __add_pages(nid, zone, start_pfn, nr_pages);
1089 * This function cannot be __init, since exceptions don't work in that
1090 * section. Put this after the callers, so that it cannot be inlined.
1092 static noinline int do_test_wp_bit(void)
1097 __asm__ __volatile__(
1103 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1112 #ifdef CONFIG_DEBUG_RODATA
1113 const int rodata_test_data = 0xC3;
1114 EXPORT_SYMBOL_GPL(rodata_test_data);
1116 void mark_rodata_ro(void)
1118 unsigned long start = PFN_ALIGN(_text);
1119 unsigned long size = PFN_ALIGN(_etext) - start;
1121 #ifndef CONFIG_DYNAMIC_FTRACE
1122 /* Dynamic tracing modifies the kernel text section */
1123 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1124 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1127 #ifdef CONFIG_CPA_DEBUG
1128 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1130 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
1132 printk(KERN_INFO "Testing CPA: write protecting again\n");
1133 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
1135 #endif /* CONFIG_DYNAMIC_FTRACE */
1138 size = (unsigned long)__end_rodata - start;
1139 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1140 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1144 #ifdef CONFIG_CPA_DEBUG
1145 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
1146 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
1148 printk(KERN_INFO "Testing CPA: write protecting again\n");
1149 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1154 void free_init_pages(char *what, unsigned long begin, unsigned long end)
1156 #ifdef CONFIG_DEBUG_PAGEALLOC
1158 * If debugging page accesses then do not free this memory but
1159 * mark them not present - any buggy init-section access will
1160 * create a kernel page fault:
1162 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1163 begin, PAGE_ALIGN(end));
1164 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1169 * We just marked the kernel text read only above, now that
1170 * we are going to free part of that, we need to make that
1173 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1175 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1176 ClearPageReserved(virt_to_page(addr));
1177 init_page_count(virt_to_page(addr));
1178 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1182 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1186 void free_initmem(void)
1188 free_init_pages("unused kernel memory",
1189 (unsigned long)(&__init_begin),
1190 (unsigned long)(&__init_end));
1193 #ifdef CONFIG_BLK_DEV_INITRD
1194 void free_initrd_mem(unsigned long start, unsigned long end)
1196 free_init_pages("initrd memory", start, end);
1200 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1203 return reserve_bootmem(phys, len, flags);