x86: use WARN_ONCE in workaround for mtrr mask
[linux-2.6] / arch / x86 / mm / init_32.c
1 /*
2  *
3  *  Copyright (C) 1995  Linus Torvalds
4  *
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  */
7
8 #include <linux/module.h>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/hugetlb.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/highmem.h>
23 #include <linux/pagemap.h>
24 #include <linux/pfn.h>
25 #include <linux/poison.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memory_hotplug.h>
30 #include <linux/initrd.h>
31 #include <linux/cpumask.h>
32
33 #include <asm/asm.h>
34 #include <asm/processor.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/pgtable.h>
38 #include <asm/dma.h>
39 #include <asm/fixmap.h>
40 #include <asm/e820.h>
41 #include <asm/apic.h>
42 #include <asm/bugs.h>
43 #include <asm/tlb.h>
44 #include <asm/tlbflush.h>
45 #include <asm/pgalloc.h>
46 #include <asm/sections.h>
47 #include <asm/paravirt.h>
48 #include <asm/setup.h>
49 #include <asm/cacheflush.h>
50
51 unsigned int __VMALLOC_RESERVE = 128 << 20;
52
53 unsigned long max_low_pfn_mapped;
54 unsigned long max_pfn_mapped;
55
56 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 unsigned long highstart_pfn, highend_pfn;
58
59 static noinline int do_test_wp_bit(void);
60
61
62 static unsigned long __initdata table_start;
63 static unsigned long __meminitdata table_end;
64 static unsigned long __meminitdata table_top;
65
66 static int __initdata after_init_bootmem;
67
68 static __init void *alloc_low_page(unsigned long *phys)
69 {
70         unsigned long pfn = table_end++;
71         void *adr;
72
73         if (pfn >= table_top)
74                 panic("alloc_low_page: ran out of memory");
75
76         adr = __va(pfn * PAGE_SIZE);
77         memset(adr, 0, PAGE_SIZE);
78         *phys  = pfn * PAGE_SIZE;
79         return adr;
80 }
81
82 /*
83  * Creates a middle page table and puts a pointer to it in the
84  * given global directory entry. This only returns the gd entry
85  * in non-PAE compilation mode, since the middle layer is folded.
86  */
87 static pmd_t * __init one_md_table_init(pgd_t *pgd)
88 {
89         pud_t *pud;
90         pmd_t *pmd_table;
91
92 #ifdef CONFIG_X86_PAE
93         unsigned long phys;
94         if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
95                 if (after_init_bootmem)
96                         pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
97                 else
98                         pmd_table = (pmd_t *)alloc_low_page(&phys);
99                 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
100                 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
101                 pud = pud_offset(pgd, 0);
102                 BUG_ON(pmd_table != pmd_offset(pud, 0));
103         }
104 #endif
105         pud = pud_offset(pgd, 0);
106         pmd_table = pmd_offset(pud, 0);
107
108         return pmd_table;
109 }
110
111 /*
112  * Create a page table and place a pointer to it in a middle page
113  * directory entry:
114  */
115 static pte_t * __init one_page_table_init(pmd_t *pmd)
116 {
117         if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
118                 pte_t *page_table = NULL;
119
120                 if (after_init_bootmem) {
121 #ifdef CONFIG_DEBUG_PAGEALLOC
122                         page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
123 #endif
124                         if (!page_table)
125                                 page_table =
126                                 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
127                 } else {
128                         unsigned long phys;
129                         page_table = (pte_t *)alloc_low_page(&phys);
130                 }
131
132                 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
133                 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
134                 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
135         }
136
137         return pte_offset_kernel(pmd, 0);
138 }
139
140 /*
141  * This function initializes a certain range of kernel virtual memory
142  * with new bootmem page tables, everywhere page tables are missing in
143  * the given range.
144  *
145  * NOTE: The pagetables are allocated contiguous on the physical space
146  * so we can cache the place of the first one and move around without
147  * checking the pgd every time.
148  */
149 static void __init
150 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
151 {
152         int pgd_idx, pmd_idx;
153         unsigned long vaddr;
154         pgd_t *pgd;
155         pmd_t *pmd;
156
157         vaddr = start;
158         pgd_idx = pgd_index(vaddr);
159         pmd_idx = pmd_index(vaddr);
160         pgd = pgd_base + pgd_idx;
161
162         for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
163                 pmd = one_md_table_init(pgd);
164                 pmd = pmd + pmd_index(vaddr);
165                 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
166                                                         pmd++, pmd_idx++) {
167                         one_page_table_init(pmd);
168
169                         vaddr += PMD_SIZE;
170                 }
171                 pmd_idx = 0;
172         }
173 }
174
175 static inline int is_kernel_text(unsigned long addr)
176 {
177         if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
178                 return 1;
179         return 0;
180 }
181
182 /*
183  * This maps the physical memory to kernel virtual address space, a total
184  * of max_low_pfn pages, by creating page tables starting from address
185  * PAGE_OFFSET:
186  */
187 static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
188                                                 unsigned long start_pfn,
189                                                 unsigned long end_pfn,
190                                                 int use_pse)
191 {
192         int pgd_idx, pmd_idx, pte_ofs;
193         unsigned long pfn;
194         pgd_t *pgd;
195         pmd_t *pmd;
196         pte_t *pte;
197         unsigned pages_2m = 0, pages_4k = 0;
198
199         if (!cpu_has_pse)
200                 use_pse = 0;
201
202         pfn = start_pfn;
203         pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
204         pgd = pgd_base + pgd_idx;
205         for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
206                 pmd = one_md_table_init(pgd);
207
208                 if (pfn >= end_pfn)
209                         continue;
210 #ifdef CONFIG_X86_PAE
211                 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
212                 pmd += pmd_idx;
213 #else
214                 pmd_idx = 0;
215 #endif
216                 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
217                      pmd++, pmd_idx++) {
218                         unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
219
220                         /*
221                          * Map with big pages if possible, otherwise
222                          * create normal page tables:
223                          */
224                         if (use_pse) {
225                                 unsigned int addr2;
226                                 pgprot_t prot = PAGE_KERNEL_LARGE;
227
228                                 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
229                                         PAGE_OFFSET + PAGE_SIZE-1;
230
231                                 if (is_kernel_text(addr) ||
232                                     is_kernel_text(addr2))
233                                         prot = PAGE_KERNEL_LARGE_EXEC;
234
235                                 pages_2m++;
236                                 set_pmd(pmd, pfn_pmd(pfn, prot));
237
238                                 pfn += PTRS_PER_PTE;
239                                 continue;
240                         }
241                         pte = one_page_table_init(pmd);
242
243                         pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
244                         pte += pte_ofs;
245                         for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
246                              pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
247                                 pgprot_t prot = PAGE_KERNEL;
248
249                                 if (is_kernel_text(addr))
250                                         prot = PAGE_KERNEL_EXEC;
251
252                                 pages_4k++;
253                                 set_pte(pte, pfn_pte(pfn, prot));
254                         }
255                 }
256         }
257         update_page_count(PG_LEVEL_2M, pages_2m);
258         update_page_count(PG_LEVEL_4K, pages_4k);
259 }
260
261 /*
262  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
263  * is valid. The argument is a physical page number.
264  *
265  *
266  * On x86, access has to be given to the first megabyte of ram because that area
267  * contains bios code and data regions used by X and dosemu and similar apps.
268  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
269  * mmio resources as well as potential bios/acpi data regions.
270  */
271 int devmem_is_allowed(unsigned long pagenr)
272 {
273         if (pagenr <= 256)
274                 return 1;
275         if (!page_is_ram(pagenr))
276                 return 1;
277         return 0;
278 }
279
280 #ifdef CONFIG_HIGHMEM
281 pte_t *kmap_pte;
282 pgprot_t kmap_prot;
283
284 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
285 {
286         return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
287                         vaddr), vaddr), vaddr);
288 }
289
290 static void __init kmap_init(void)
291 {
292         unsigned long kmap_vstart;
293
294         /*
295          * Cache the first kmap pte:
296          */
297         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
298         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
299
300         kmap_prot = PAGE_KERNEL;
301 }
302
303 static void __init permanent_kmaps_init(pgd_t *pgd_base)
304 {
305         unsigned long vaddr;
306         pgd_t *pgd;
307         pud_t *pud;
308         pmd_t *pmd;
309         pte_t *pte;
310
311         vaddr = PKMAP_BASE;
312         page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
313
314         pgd = swapper_pg_dir + pgd_index(vaddr);
315         pud = pud_offset(pgd, vaddr);
316         pmd = pmd_offset(pud, vaddr);
317         pte = pte_offset_kernel(pmd, vaddr);
318         pkmap_page_table = pte;
319 }
320
321 static void __init add_one_highpage_init(struct page *page, int pfn)
322 {
323         ClearPageReserved(page);
324         init_page_count(page);
325         __free_page(page);
326         totalhigh_pages++;
327 }
328
329 struct add_highpages_data {
330         unsigned long start_pfn;
331         unsigned long end_pfn;
332 };
333
334 static int __init add_highpages_work_fn(unsigned long start_pfn,
335                                          unsigned long end_pfn, void *datax)
336 {
337         int node_pfn;
338         struct page *page;
339         unsigned long final_start_pfn, final_end_pfn;
340         struct add_highpages_data *data;
341
342         data = (struct add_highpages_data *)datax;
343
344         final_start_pfn = max(start_pfn, data->start_pfn);
345         final_end_pfn = min(end_pfn, data->end_pfn);
346         if (final_start_pfn >= final_end_pfn)
347                 return 0;
348
349         for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
350              node_pfn++) {
351                 if (!pfn_valid(node_pfn))
352                         continue;
353                 page = pfn_to_page(node_pfn);
354                 add_one_highpage_init(page, node_pfn);
355         }
356
357         return 0;
358
359 }
360
361 void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
362                                               unsigned long end_pfn)
363 {
364         struct add_highpages_data data;
365
366         data.start_pfn = start_pfn;
367         data.end_pfn = end_pfn;
368
369         work_with_active_regions(nid, add_highpages_work_fn, &data);
370 }
371
372 #ifndef CONFIG_NUMA
373 static void __init set_highmem_pages_init(void)
374 {
375         add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
376
377         totalram_pages += totalhigh_pages;
378 }
379 #endif /* !CONFIG_NUMA */
380
381 #else
382 # define kmap_init()                            do { } while (0)
383 # define permanent_kmaps_init(pgd_base)         do { } while (0)
384 # define set_highmem_pages_init()       do { } while (0)
385 #endif /* CONFIG_HIGHMEM */
386
387 void __init native_pagetable_setup_start(pgd_t *base)
388 {
389         unsigned long pfn, va;
390         pgd_t *pgd;
391         pud_t *pud;
392         pmd_t *pmd;
393         pte_t *pte;
394
395         /*
396          * Remove any mappings which extend past the end of physical
397          * memory from the boot time page table:
398          */
399         for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
400                 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
401                 pgd = base + pgd_index(va);
402                 if (!pgd_present(*pgd))
403                         break;
404
405                 pud = pud_offset(pgd, va);
406                 pmd = pmd_offset(pud, va);
407                 if (!pmd_present(*pmd))
408                         break;
409
410                 pte = pte_offset_kernel(pmd, va);
411                 if (!pte_present(*pte))
412                         break;
413
414                 pte_clear(NULL, va, pte);
415         }
416         paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
417 }
418
419 void __init native_pagetable_setup_done(pgd_t *base)
420 {
421 }
422
423 /*
424  * Build a proper pagetable for the kernel mappings.  Up until this
425  * point, we've been running on some set of pagetables constructed by
426  * the boot process.
427  *
428  * If we're booting on native hardware, this will be a pagetable
429  * constructed in arch/x86/kernel/head_32.S.  The root of the
430  * pagetable will be swapper_pg_dir.
431  *
432  * If we're booting paravirtualized under a hypervisor, then there are
433  * more options: we may already be running PAE, and the pagetable may
434  * or may not be based in swapper_pg_dir.  In any case,
435  * paravirt_pagetable_setup_start() will set up swapper_pg_dir
436  * appropriately for the rest of the initialization to work.
437  *
438  * In general, pagetable_init() assumes that the pagetable may already
439  * be partially populated, and so it avoids stomping on any existing
440  * mappings.
441  */
442 static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
443 {
444         unsigned long vaddr, end;
445
446         /*
447          * Fixed mappings, only the page table structure has to be
448          * created - mappings will be set by set_fixmap():
449          */
450         early_ioremap_clear();
451         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
452         end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
453         page_table_range_init(vaddr, end, pgd_base);
454         early_ioremap_reset();
455 }
456
457 static void __init pagetable_init(void)
458 {
459         pgd_t *pgd_base = swapper_pg_dir;
460
461         permanent_kmaps_init(pgd_base);
462 }
463
464 #ifdef CONFIG_ACPI_SLEEP
465 /*
466  * ACPI suspend needs this for resume, because things like the intel-agp
467  * driver might have split up a kernel 4MB mapping.
468  */
469 char swsusp_pg_dir[PAGE_SIZE]
470         __attribute__ ((aligned(PAGE_SIZE)));
471
472 static inline void save_pg_dir(void)
473 {
474         memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
475 }
476 #else /* !CONFIG_ACPI_SLEEP */
477 static inline void save_pg_dir(void)
478 {
479 }
480 #endif /* !CONFIG_ACPI_SLEEP */
481
482 void zap_low_mappings(void)
483 {
484         int i;
485
486         /*
487          * Zap initial low-memory mappings.
488          *
489          * Note that "pgd_clear()" doesn't do it for
490          * us, because pgd_clear() is a no-op on i386.
491          */
492         for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
493 #ifdef CONFIG_X86_PAE
494                 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
495 #else
496                 set_pgd(swapper_pg_dir+i, __pgd(0));
497 #endif
498         }
499         flush_tlb_all();
500 }
501
502 int nx_enabled;
503
504 pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
505 EXPORT_SYMBOL_GPL(__supported_pte_mask);
506
507 #ifdef CONFIG_X86_PAE
508
509 static int disable_nx __initdata;
510
511 /*
512  * noexec = on|off
513  *
514  * Control non executable mappings.
515  *
516  * on      Enable
517  * off     Disable
518  */
519 static int __init noexec_setup(char *str)
520 {
521         if (!str || !strcmp(str, "on")) {
522                 if (cpu_has_nx) {
523                         __supported_pte_mask |= _PAGE_NX;
524                         disable_nx = 0;
525                 }
526         } else {
527                 if (!strcmp(str, "off")) {
528                         disable_nx = 1;
529                         __supported_pte_mask &= ~_PAGE_NX;
530                 } else {
531                         return -EINVAL;
532                 }
533         }
534
535         return 0;
536 }
537 early_param("noexec", noexec_setup);
538
539 static void __init set_nx(void)
540 {
541         unsigned int v[4], l, h;
542
543         if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
544                 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
545
546                 if ((v[3] & (1 << 20)) && !disable_nx) {
547                         rdmsr(MSR_EFER, l, h);
548                         l |= EFER_NX;
549                         wrmsr(MSR_EFER, l, h);
550                         nx_enabled = 1;
551                         __supported_pte_mask |= _PAGE_NX;
552                 }
553         }
554 }
555 #endif
556
557 /* user-defined highmem size */
558 static unsigned int highmem_pages = -1;
559
560 /*
561  * highmem=size forces highmem to be exactly 'size' bytes.
562  * This works even on boxes that have no highmem otherwise.
563  * This also works to reduce highmem size on bigger boxes.
564  */
565 static int __init parse_highmem(char *arg)
566 {
567         if (!arg)
568                 return -EINVAL;
569
570         highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
571         return 0;
572 }
573 early_param("highmem", parse_highmem);
574
575 /*
576  * Determine low and high memory ranges:
577  */
578 void __init find_low_pfn_range(void)
579 {
580         /* it could update max_pfn */
581
582         /* max_low_pfn is 0, we already have early_res support */
583
584         max_low_pfn = max_pfn;
585         if (max_low_pfn > MAXMEM_PFN) {
586                 if (highmem_pages == -1)
587                         highmem_pages = max_pfn - MAXMEM_PFN;
588                 if (highmem_pages + MAXMEM_PFN < max_pfn)
589                         max_pfn = MAXMEM_PFN + highmem_pages;
590                 if (highmem_pages + MAXMEM_PFN > max_pfn) {
591                         printk(KERN_WARNING "only %luMB highmem pages "
592                                 "available, ignoring highmem size of %uMB.\n",
593                                 pages_to_mb(max_pfn - MAXMEM_PFN),
594                                 pages_to_mb(highmem_pages));
595                         highmem_pages = 0;
596                 }
597                 max_low_pfn = MAXMEM_PFN;
598 #ifndef CONFIG_HIGHMEM
599                 /* Maximum memory usable is what is directly addressable */
600                 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
601                                         MAXMEM>>20);
602                 if (max_pfn > MAX_NONPAE_PFN)
603                         printk(KERN_WARNING
604                                  "Use a HIGHMEM64G enabled kernel.\n");
605                 else
606                         printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
607                 max_pfn = MAXMEM_PFN;
608 #else /* !CONFIG_HIGHMEM */
609 #ifndef CONFIG_HIGHMEM64G
610                 if (max_pfn > MAX_NONPAE_PFN) {
611                         max_pfn = MAX_NONPAE_PFN;
612                         printk(KERN_WARNING "Warning only 4GB will be used."
613                                 "Use a HIGHMEM64G enabled kernel.\n");
614                 }
615 #endif /* !CONFIG_HIGHMEM64G */
616 #endif /* !CONFIG_HIGHMEM */
617         } else {
618                 if (highmem_pages == -1)
619                         highmem_pages = 0;
620 #ifdef CONFIG_HIGHMEM
621                 if (highmem_pages >= max_pfn) {
622                         printk(KERN_ERR "highmem size specified (%uMB) is "
623                                 "bigger than pages available (%luMB)!.\n",
624                                 pages_to_mb(highmem_pages),
625                                 pages_to_mb(max_pfn));
626                         highmem_pages = 0;
627                 }
628                 if (highmem_pages) {
629                         if (max_low_pfn - highmem_pages <
630                             64*1024*1024/PAGE_SIZE){
631                                 printk(KERN_ERR "highmem size %uMB results in "
632                                 "smaller than 64MB lowmem, ignoring it.\n"
633                                         , pages_to_mb(highmem_pages));
634                                 highmem_pages = 0;
635                         }
636                         max_low_pfn -= highmem_pages;
637                 }
638 #else
639                 if (highmem_pages)
640                         printk(KERN_ERR "ignoring highmem size on non-highmem"
641                                         " kernel!\n");
642 #endif
643         }
644 }
645
646 #ifndef CONFIG_NEED_MULTIPLE_NODES
647 void __init initmem_init(unsigned long start_pfn,
648                                   unsigned long end_pfn)
649 {
650 #ifdef CONFIG_HIGHMEM
651         highstart_pfn = highend_pfn = max_pfn;
652         if (max_pfn > max_low_pfn)
653                 highstart_pfn = max_low_pfn;
654         memory_present(0, 0, highend_pfn);
655         e820_register_active_regions(0, 0, highend_pfn);
656         printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
657                 pages_to_mb(highend_pfn - highstart_pfn));
658         num_physpages = highend_pfn;
659         high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
660 #else
661         memory_present(0, 0, max_low_pfn);
662         e820_register_active_regions(0, 0, max_low_pfn);
663         num_physpages = max_low_pfn;
664         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
665 #endif
666 #ifdef CONFIG_FLATMEM
667         max_mapnr = num_physpages;
668 #endif
669         printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
670                         pages_to_mb(max_low_pfn));
671
672         setup_bootmem_allocator();
673 }
674 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
675
676 static void __init zone_sizes_init(void)
677 {
678         unsigned long max_zone_pfns[MAX_NR_ZONES];
679         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
680         max_zone_pfns[ZONE_DMA] =
681                 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
682         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
683 #ifdef CONFIG_HIGHMEM
684         max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
685 #endif
686
687         free_area_init_nodes(max_zone_pfns);
688 }
689
690 void __init setup_bootmem_allocator(void)
691 {
692         int i;
693         unsigned long bootmap_size, bootmap;
694         /*
695          * Initialize the boot-time allocator (with low memory only):
696          */
697         bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
698         bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
699                                  max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
700                                  PAGE_SIZE);
701         if (bootmap == -1L)
702                 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
703         reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
704
705         /* don't touch min_low_pfn */
706         bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
707                                          min_low_pfn, max_low_pfn);
708         printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
709                  max_pfn_mapped<<PAGE_SHIFT);
710         printk(KERN_INFO "  low ram: %08lx - %08lx\n",
711                  min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
712         printk(KERN_INFO "  bootmap %08lx - %08lx\n",
713                  bootmap, bootmap + bootmap_size);
714         for_each_online_node(i)
715                 free_bootmem_with_active_regions(i, max_low_pfn);
716         early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
717
718         after_init_bootmem = 1;
719 }
720
721 static void __init find_early_table_space(unsigned long end)
722 {
723         unsigned long puds, pmds, ptes, tables, start;
724
725         puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
726         tables = PAGE_ALIGN(puds * sizeof(pud_t));
727
728         pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
729         tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
730
731         if (cpu_has_pse) {
732                 unsigned long extra;
733
734                 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
735                 extra += PMD_SIZE;
736                 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
737         } else
738                 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
739
740         tables += PAGE_ALIGN(ptes * sizeof(pte_t));
741
742         /* for fixmap */
743         tables += PAGE_SIZE * 2;
744
745         /*
746          * RED-PEN putting page tables only on node 0 could
747          * cause a hotspot and fill up ZONE_DMA. The page tables
748          * need roughly 0.5KB per GB.
749          */
750         start = 0x7000;
751         table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
752                                         tables, PAGE_SIZE);
753         if (table_start == -1UL)
754                 panic("Cannot find space for the kernel page tables");
755
756         table_start >>= PAGE_SHIFT;
757         table_end = table_start;
758         table_top = table_start + (tables>>PAGE_SHIFT);
759
760         printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
761                 end, table_start << PAGE_SHIFT,
762                 (table_start << PAGE_SHIFT) + tables);
763 }
764
765 unsigned long __init_refok init_memory_mapping(unsigned long start,
766                                                 unsigned long end)
767 {
768         pgd_t *pgd_base = swapper_pg_dir;
769         unsigned long start_pfn, end_pfn;
770         unsigned long big_page_start;
771
772         /*
773          * Find space for the kernel direct mapping tables.
774          */
775         if (!after_init_bootmem)
776                 find_early_table_space(end);
777
778 #ifdef CONFIG_X86_PAE
779         set_nx();
780         if (nx_enabled)
781                 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
782 #endif
783
784         /* Enable PSE if available */
785         if (cpu_has_pse)
786                 set_in_cr4(X86_CR4_PSE);
787
788         /* Enable PGE if available */
789         if (cpu_has_pge) {
790                 set_in_cr4(X86_CR4_PGE);
791                 __supported_pte_mask |= _PAGE_GLOBAL;
792         }
793
794         /*
795          * Don't use a large page for the first 2/4MB of memory
796          * because there are often fixed size MTRRs in there
797          * and overlapping MTRRs into large pages can cause
798          * slowdowns.
799          */
800         big_page_start = PMD_SIZE;
801
802         if (start < big_page_start) {
803                 start_pfn = start >> PAGE_SHIFT;
804                 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
805         } else {
806                 /* head is not big page alignment ? */
807                 start_pfn = start >> PAGE_SHIFT;
808                 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
809                                  << (PMD_SHIFT - PAGE_SHIFT);
810         }
811         if (start_pfn < end_pfn)
812                 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
813
814         /* big page range */
815         start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
816                          << (PMD_SHIFT - PAGE_SHIFT);
817         if (start_pfn < (big_page_start >> PAGE_SHIFT))
818                 start_pfn =  big_page_start >> PAGE_SHIFT;
819         end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
820         if (start_pfn < end_pfn)
821                 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
822                                                 cpu_has_pse);
823
824         /* tail is not big page alignment ? */
825         start_pfn = end_pfn;
826         if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
827                 end_pfn = end >> PAGE_SHIFT;
828                 if (start_pfn < end_pfn)
829                         kernel_physical_mapping_init(pgd_base, start_pfn,
830                                                          end_pfn, 0);
831         }
832
833         early_ioremap_page_table_range_init(pgd_base);
834
835         load_cr3(swapper_pg_dir);
836
837         __flush_tlb_all();
838
839         if (!after_init_bootmem)
840                 reserve_early(table_start << PAGE_SHIFT,
841                                  table_end << PAGE_SHIFT, "PGTABLE");
842
843         if (!after_init_bootmem)
844                 early_memtest(start, end);
845
846         return end >> PAGE_SHIFT;
847 }
848
849
850 /*
851  * paging_init() sets up the page tables - note that the first 8MB are
852  * already mapped by head.S.
853  *
854  * This routines also unmaps the page at virtual kernel address 0, so
855  * that we can trap those pesky NULL-reference errors in the kernel.
856  */
857 void __init paging_init(void)
858 {
859         pagetable_init();
860
861         __flush_tlb_all();
862
863         kmap_init();
864
865         /*
866          * NOTE: at this point the bootmem allocator is fully available.
867          */
868         sparse_init();
869         zone_sizes_init();
870 }
871
872 /*
873  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
874  * and also on some strange 486's. All 586+'s are OK. This used to involve
875  * black magic jumps to work around some nasty CPU bugs, but fortunately the
876  * switch to using exceptions got rid of all that.
877  */
878 static void __init test_wp_bit(void)
879 {
880         printk(KERN_INFO
881   "Checking if this processor honours the WP bit even in supervisor mode...");
882
883         /* Any page-aligned address will do, the test is non-destructive */
884         __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
885         boot_cpu_data.wp_works_ok = do_test_wp_bit();
886         clear_fixmap(FIX_WP_TEST);
887
888         if (!boot_cpu_data.wp_works_ok) {
889                 printk(KERN_CONT "No.\n");
890 #ifdef CONFIG_X86_WP_WORKS_OK
891                 panic(
892   "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
893 #endif
894         } else {
895                 printk(KERN_CONT "Ok.\n");
896         }
897 }
898
899 static struct kcore_list kcore_mem, kcore_vmalloc;
900
901 void __init mem_init(void)
902 {
903         int codesize, reservedpages, datasize, initsize;
904         int tmp;
905
906 #ifdef CONFIG_FLATMEM
907         BUG_ON(!mem_map);
908 #endif
909         /* this will put all low memory onto the freelists */
910         totalram_pages += free_all_bootmem();
911
912         reservedpages = 0;
913         for (tmp = 0; tmp < max_low_pfn; tmp++)
914                 /*
915                  * Only count reserved RAM pages:
916                  */
917                 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
918                         reservedpages++;
919
920         set_highmem_pages_init();
921
922         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
923         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
924         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
925
926         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
927         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
928                    VMALLOC_END-VMALLOC_START);
929
930         printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
931                         "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
932                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
933                 num_physpages << (PAGE_SHIFT-10),
934                 codesize >> 10,
935                 reservedpages << (PAGE_SHIFT-10),
936                 datasize >> 10,
937                 initsize >> 10,
938                 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
939                );
940
941         printk(KERN_INFO "virtual kernel memory layout:\n"
942                 "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
943 #ifdef CONFIG_HIGHMEM
944                 "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
945 #endif
946                 "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
947                 "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
948                 "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
949                 "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
950                 "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
951                 FIXADDR_START, FIXADDR_TOP,
952                 (FIXADDR_TOP - FIXADDR_START) >> 10,
953
954 #ifdef CONFIG_HIGHMEM
955                 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
956                 (LAST_PKMAP*PAGE_SIZE) >> 10,
957 #endif
958
959                 VMALLOC_START, VMALLOC_END,
960                 (VMALLOC_END - VMALLOC_START) >> 20,
961
962                 (unsigned long)__va(0), (unsigned long)high_memory,
963                 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
964
965                 (unsigned long)&__init_begin, (unsigned long)&__init_end,
966                 ((unsigned long)&__init_end -
967                  (unsigned long)&__init_begin) >> 10,
968
969                 (unsigned long)&_etext, (unsigned long)&_edata,
970                 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
971
972                 (unsigned long)&_text, (unsigned long)&_etext,
973                 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
974
975 #ifdef CONFIG_HIGHMEM
976         BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE        > FIXADDR_START);
977         BUG_ON(VMALLOC_END                              > PKMAP_BASE);
978 #endif
979         BUG_ON(VMALLOC_START                            > VMALLOC_END);
980         BUG_ON((unsigned long)high_memory               > VMALLOC_START);
981
982         if (boot_cpu_data.wp_works_ok < 0)
983                 test_wp_bit();
984
985         cpa_init();
986         save_pg_dir();
987         zap_low_mappings();
988 }
989
990 #ifdef CONFIG_MEMORY_HOTPLUG
991 int arch_add_memory(int nid, u64 start, u64 size)
992 {
993         struct pglist_data *pgdata = NODE_DATA(nid);
994         struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
995         unsigned long start_pfn = start >> PAGE_SHIFT;
996         unsigned long nr_pages = size >> PAGE_SHIFT;
997
998         return __add_pages(zone, start_pfn, nr_pages);
999 }
1000 #endif
1001
1002 /*
1003  * This function cannot be __init, since exceptions don't work in that
1004  * section.  Put this after the callers, so that it cannot be inlined.
1005  */
1006 static noinline int do_test_wp_bit(void)
1007 {
1008         char tmp_reg;
1009         int flag;
1010
1011         __asm__ __volatile__(
1012                 "       movb %0, %1     \n"
1013                 "1:     movb %1, %0     \n"
1014                 "       xorl %2, %2     \n"
1015                 "2:                     \n"
1016                 _ASM_EXTABLE(1b,2b)
1017                 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1018                  "=q" (tmp_reg),
1019                  "=r" (flag)
1020                 :"2" (1)
1021                 :"memory");
1022
1023         return flag;
1024 }
1025
1026 #ifdef CONFIG_DEBUG_RODATA
1027 const int rodata_test_data = 0xC3;
1028 EXPORT_SYMBOL_GPL(rodata_test_data);
1029
1030 void mark_rodata_ro(void)
1031 {
1032         unsigned long start = PFN_ALIGN(_text);
1033         unsigned long size = PFN_ALIGN(_etext) - start;
1034
1035 #ifndef CONFIG_DYNAMIC_FTRACE
1036         /* Dynamic tracing modifies the kernel text section */
1037         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1038         printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1039                 size >> 10);
1040
1041 #ifdef CONFIG_CPA_DEBUG
1042         printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1043                 start, start+size);
1044         set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
1045
1046         printk(KERN_INFO "Testing CPA: write protecting again\n");
1047         set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
1048 #endif
1049 #endif /* CONFIG_DYNAMIC_FTRACE */
1050
1051         start += size;
1052         size = (unsigned long)__end_rodata - start;
1053         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1054         printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1055                 size >> 10);
1056         rodata_test();
1057
1058 #ifdef CONFIG_CPA_DEBUG
1059         printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
1060         set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
1061
1062         printk(KERN_INFO "Testing CPA: write protecting again\n");
1063         set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1064 #endif
1065 }
1066 #endif
1067
1068 void free_init_pages(char *what, unsigned long begin, unsigned long end)
1069 {
1070 #ifdef CONFIG_DEBUG_PAGEALLOC
1071         /*
1072          * If debugging page accesses then do not free this memory but
1073          * mark them not present - any buggy init-section access will
1074          * create a kernel page fault:
1075          */
1076         printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1077                 begin, PAGE_ALIGN(end));
1078         set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1079 #else
1080         unsigned long addr;
1081
1082         /*
1083          * We just marked the kernel text read only above, now that
1084          * we are going to free part of that, we need to make that
1085          * writeable first.
1086          */
1087         set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1088
1089         for (addr = begin; addr < end; addr += PAGE_SIZE) {
1090                 ClearPageReserved(virt_to_page(addr));
1091                 init_page_count(virt_to_page(addr));
1092                 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1093                 free_page(addr);
1094                 totalram_pages++;
1095         }
1096         printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1097 #endif
1098 }
1099
1100 void free_initmem(void)
1101 {
1102         free_init_pages("unused kernel memory",
1103                         (unsigned long)(&__init_begin),
1104                         (unsigned long)(&__init_end));
1105 }
1106
1107 #ifdef CONFIG_BLK_DEV_INITRD
1108 void free_initrd_mem(unsigned long start, unsigned long end)
1109 {
1110         free_init_pages("initrd memory", start, end);
1111 }
1112 #endif
1113
1114 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1115                                    int flags)
1116 {
1117         return reserve_bootmem(phys, len, flags);
1118 }