Merge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[linux-2.6] / arch / x86 / mm / init_64.c
1 /*
2  *  linux/arch/x86_64/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
6  *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7  */
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/pci.h>
26 #include <linux/pfn.h>
27 #include <linux/poison.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/module.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/nmi.h>
32
33 #include <asm/processor.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgalloc.h>
38 #include <asm/dma.h>
39 #include <asm/fixmap.h>
40 #include <asm/e820.h>
41 #include <asm/apic.h>
42 #include <asm/tlb.h>
43 #include <asm/mmu_context.h>
44 #include <asm/proto.h>
45 #include <asm/smp.h>
46 #include <asm/sections.h>
47 #include <asm/kdebug.h>
48 #include <asm/numa.h>
49 #include <asm/cacheflush.h>
50
51 /*
52  * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
53  * The direct mapping extends to max_pfn_mapped, so that we can directly access
54  * apertures, ACPI and other tables without having to play with fixmaps.
55  */
56 unsigned long max_low_pfn_mapped;
57 unsigned long max_pfn_mapped;
58
59 static unsigned long dma_reserve __initdata;
60
61 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
62
63 int direct_gbpages
64 #ifdef CONFIG_DIRECT_GBPAGES
65                                 = 1
66 #endif
67 ;
68
69 static int __init parse_direct_gbpages_off(char *arg)
70 {
71         direct_gbpages = 0;
72         return 0;
73 }
74 early_param("nogbpages", parse_direct_gbpages_off);
75
76 static int __init parse_direct_gbpages_on(char *arg)
77 {
78         direct_gbpages = 1;
79         return 0;
80 }
81 early_param("gbpages", parse_direct_gbpages_on);
82
83 /*
84  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
85  * physical space so we can cache the place of the first one and move
86  * around without checking the pgd every time.
87  */
88
89 int after_bootmem;
90
91 unsigned long __supported_pte_mask __read_mostly = ~0UL;
92 EXPORT_SYMBOL_GPL(__supported_pte_mask);
93
94 static int do_not_nx __cpuinitdata;
95
96 /*
97  * noexec=on|off
98  * Control non-executable mappings for 64-bit processes.
99  *
100  * on   Enable (default)
101  * off  Disable
102  */
103 static int __init nonx_setup(char *str)
104 {
105         if (!str)
106                 return -EINVAL;
107         if (!strncmp(str, "on", 2)) {
108                 __supported_pte_mask |= _PAGE_NX;
109                 do_not_nx = 0;
110         } else if (!strncmp(str, "off", 3)) {
111                 do_not_nx = 1;
112                 __supported_pte_mask &= ~_PAGE_NX;
113         }
114         return 0;
115 }
116 early_param("noexec", nonx_setup);
117
118 void __cpuinit check_efer(void)
119 {
120         unsigned long efer;
121
122         rdmsrl(MSR_EFER, efer);
123         if (!(efer & EFER_NX) || do_not_nx)
124                 __supported_pte_mask &= ~_PAGE_NX;
125 }
126
127 int force_personality32;
128
129 /*
130  * noexec32=on|off
131  * Control non executable heap for 32bit processes.
132  * To control the stack too use noexec=off
133  *
134  * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
135  * off  PROT_READ implies PROT_EXEC
136  */
137 static int __init nonx32_setup(char *str)
138 {
139         if (!strcmp(str, "on"))
140                 force_personality32 &= ~READ_IMPLIES_EXEC;
141         else if (!strcmp(str, "off"))
142                 force_personality32 |= READ_IMPLIES_EXEC;
143         return 1;
144 }
145 __setup("noexec32=", nonx32_setup);
146
147 /*
148  * NOTE: This function is marked __ref because it calls __init function
149  * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
150  */
151 static __ref void *spp_getpage(void)
152 {
153         void *ptr;
154
155         if (after_bootmem)
156                 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
157         else
158                 ptr = alloc_bootmem_pages(PAGE_SIZE);
159
160         if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
161                 panic("set_pte_phys: cannot allocate page data %s\n",
162                         after_bootmem ? "after bootmem" : "");
163         }
164
165         pr_debug("spp_getpage %p\n", ptr);
166
167         return ptr;
168 }
169
170 void
171 set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
172 {
173         pud_t *pud;
174         pmd_t *pmd;
175         pte_t *pte;
176
177         pud = pud_page + pud_index(vaddr);
178         if (pud_none(*pud)) {
179                 pmd = (pmd_t *) spp_getpage();
180                 pud_populate(&init_mm, pud, pmd);
181                 if (pmd != pmd_offset(pud, 0)) {
182                         printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
183                                 pmd, pmd_offset(pud, 0));
184                         return;
185                 }
186         }
187         pmd = pmd_offset(pud, vaddr);
188         if (pmd_none(*pmd)) {
189                 pte = (pte_t *) spp_getpage();
190                 pmd_populate_kernel(&init_mm, pmd, pte);
191                 if (pte != pte_offset_kernel(pmd, 0)) {
192                         printk(KERN_ERR "PAGETABLE BUG #02!\n");
193                         return;
194                 }
195         }
196
197         pte = pte_offset_kernel(pmd, vaddr);
198         if (!pte_none(*pte) && pte_val(new_pte) &&
199             pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
200                 pte_ERROR(*pte);
201         set_pte(pte, new_pte);
202
203         /*
204          * It's enough to flush this one mapping.
205          * (PGE mappings get flushed as well)
206          */
207         __flush_tlb_one(vaddr);
208 }
209
210 void
211 set_pte_vaddr(unsigned long vaddr, pte_t pteval)
212 {
213         pgd_t *pgd;
214         pud_t *pud_page;
215
216         pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
217
218         pgd = pgd_offset_k(vaddr);
219         if (pgd_none(*pgd)) {
220                 printk(KERN_ERR
221                         "PGD FIXMAP MISSING, it should be setup in head.S!\n");
222                 return;
223         }
224         pud_page = (pud_t*)pgd_page_vaddr(*pgd);
225         set_pte_vaddr_pud(pud_page, vaddr, pteval);
226 }
227
228 /*
229  * Create large page table mappings for a range of physical addresses.
230  */
231 static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
232                                                 pgprot_t prot)
233 {
234         pgd_t *pgd;
235         pud_t *pud;
236         pmd_t *pmd;
237
238         BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
239         for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
240                 pgd = pgd_offset_k((unsigned long)__va(phys));
241                 if (pgd_none(*pgd)) {
242                         pud = (pud_t *) spp_getpage();
243                         set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
244                                                 _PAGE_USER));
245                 }
246                 pud = pud_offset(pgd, (unsigned long)__va(phys));
247                 if (pud_none(*pud)) {
248                         pmd = (pmd_t *) spp_getpage();
249                         set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
250                                                 _PAGE_USER));
251                 }
252                 pmd = pmd_offset(pud, phys);
253                 BUG_ON(!pmd_none(*pmd));
254                 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
255         }
256 }
257
258 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
259 {
260         __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
261 }
262
263 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
264 {
265         __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
266 }
267
268 /*
269  * The head.S code sets up the kernel high mapping:
270  *
271  *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
272  *
273  * phys_addr holds the negative offset to the kernel, which is added
274  * to the compile time generated pmds. This results in invalid pmds up
275  * to the point where we hit the physaddr 0 mapping.
276  *
277  * We limit the mappings to the region from _text to _end.  _end is
278  * rounded up to the 2MB boundary. This catches the invalid pmds as
279  * well, as they are located before _text:
280  */
281 void __init cleanup_highmap(void)
282 {
283         unsigned long vaddr = __START_KERNEL_map;
284         unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
285         pmd_t *pmd = level2_kernel_pgt;
286         pmd_t *last_pmd = pmd + PTRS_PER_PMD;
287
288         for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
289                 if (pmd_none(*pmd))
290                         continue;
291                 if (vaddr < (unsigned long) _text || vaddr > end)
292                         set_pmd(pmd, __pmd(0));
293         }
294 }
295
296 static unsigned long __initdata table_start;
297 static unsigned long __meminitdata table_end;
298 static unsigned long __meminitdata table_top;
299
300 static __ref void *alloc_low_page(unsigned long *phys)
301 {
302         unsigned long pfn = table_end++;
303         void *adr;
304
305         if (after_bootmem) {
306                 adr = (void *)get_zeroed_page(GFP_ATOMIC);
307                 *phys = __pa(adr);
308
309                 return adr;
310         }
311
312         if (pfn >= table_top)
313                 panic("alloc_low_page: ran out of memory");
314
315         adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
316         memset(adr, 0, PAGE_SIZE);
317         *phys  = pfn * PAGE_SIZE;
318         return adr;
319 }
320
321 static __ref void unmap_low_page(void *adr)
322 {
323         if (after_bootmem)
324                 return;
325
326         early_iounmap(adr, PAGE_SIZE);
327 }
328
329 static unsigned long __meminit
330 phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
331               pgprot_t prot)
332 {
333         unsigned pages = 0;
334         unsigned long last_map_addr = end;
335         int i;
336
337         pte_t *pte = pte_page + pte_index(addr);
338
339         for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
340
341                 if (addr >= end) {
342                         if (!after_bootmem) {
343                                 for(; i < PTRS_PER_PTE; i++, pte++)
344                                         set_pte(pte, __pte(0));
345                         }
346                         break;
347                 }
348
349                 /*
350                  * We will re-use the existing mapping.
351                  * Xen for example has some special requirements, like mapping
352                  * pagetable pages as RO. So assume someone who pre-setup
353                  * these mappings are more intelligent.
354                  */
355                 if (pte_val(*pte))
356                         continue;
357
358                 if (0)
359                         printk("   pte=%p addr=%lx pte=%016lx\n",
360                                pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
361                 pages++;
362                 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
363                 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
364         }
365
366         update_page_count(PG_LEVEL_4K, pages);
367
368         return last_map_addr;
369 }
370
371 static unsigned long __meminit
372 phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end,
373                 pgprot_t prot)
374 {
375         pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
376
377         return phys_pte_init(pte, address, end, prot);
378 }
379
380 static unsigned long __meminit
381 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
382               unsigned long page_size_mask, pgprot_t prot)
383 {
384         unsigned long pages = 0;
385         unsigned long last_map_addr = end;
386
387         int i = pmd_index(address);
388
389         for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
390                 unsigned long pte_phys;
391                 pmd_t *pmd = pmd_page + pmd_index(address);
392                 pte_t *pte;
393                 pgprot_t new_prot = prot;
394
395                 if (address >= end) {
396                         if (!after_bootmem) {
397                                 for (; i < PTRS_PER_PMD; i++, pmd++)
398                                         set_pmd(pmd, __pmd(0));
399                         }
400                         break;
401                 }
402
403                 if (pmd_val(*pmd)) {
404                         if (!pmd_large(*pmd)) {
405                                 spin_lock(&init_mm.page_table_lock);
406                                 last_map_addr = phys_pte_update(pmd, address,
407                                                                 end, prot);
408                                 spin_unlock(&init_mm.page_table_lock);
409                                 continue;
410                         }
411                         /*
412                          * If we are ok with PG_LEVEL_2M mapping, then we will
413                          * use the existing mapping,
414                          *
415                          * Otherwise, we will split the large page mapping but
416                          * use the same existing protection bits except for
417                          * large page, so that we don't violate Intel's TLB
418                          * Application note (317080) which says, while changing
419                          * the page sizes, new and old translations should
420                          * not differ with respect to page frame and
421                          * attributes.
422                          */
423                         if (page_size_mask & (1 << PG_LEVEL_2M))
424                                 continue;
425                         new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
426                 }
427
428                 if (page_size_mask & (1<<PG_LEVEL_2M)) {
429                         pages++;
430                         spin_lock(&init_mm.page_table_lock);
431                         set_pte((pte_t *)pmd,
432                                 pfn_pte(address >> PAGE_SHIFT,
433                                         __pgprot(pgprot_val(prot) | _PAGE_PSE)));
434                         spin_unlock(&init_mm.page_table_lock);
435                         last_map_addr = (address & PMD_MASK) + PMD_SIZE;
436                         continue;
437                 }
438
439                 pte = alloc_low_page(&pte_phys);
440                 last_map_addr = phys_pte_init(pte, address, end, new_prot);
441                 unmap_low_page(pte);
442
443                 spin_lock(&init_mm.page_table_lock);
444                 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
445                 spin_unlock(&init_mm.page_table_lock);
446         }
447         update_page_count(PG_LEVEL_2M, pages);
448         return last_map_addr;
449 }
450
451 static unsigned long __meminit
452 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
453                 unsigned long page_size_mask, pgprot_t prot)
454 {
455         pmd_t *pmd = pmd_offset(pud, 0);
456         unsigned long last_map_addr;
457
458         last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot);
459         __flush_tlb_all();
460         return last_map_addr;
461 }
462
463 static unsigned long __meminit
464 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
465                          unsigned long page_size_mask)
466 {
467         unsigned long pages = 0;
468         unsigned long last_map_addr = end;
469         int i = pud_index(addr);
470
471         for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
472                 unsigned long pmd_phys;
473                 pud_t *pud = pud_page + pud_index(addr);
474                 pmd_t *pmd;
475                 pgprot_t prot = PAGE_KERNEL;
476
477                 if (addr >= end)
478                         break;
479
480                 if (!after_bootmem &&
481                                 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
482                         set_pud(pud, __pud(0));
483                         continue;
484                 }
485
486                 if (pud_val(*pud)) {
487                         if (!pud_large(*pud)) {
488                                 last_map_addr = phys_pmd_update(pud, addr, end,
489                                                          page_size_mask, prot);
490                                 continue;
491                         }
492                         /*
493                          * If we are ok with PG_LEVEL_1G mapping, then we will
494                          * use the existing mapping.
495                          *
496                          * Otherwise, we will split the gbpage mapping but use
497                          * the same existing protection  bits except for large
498                          * page, so that we don't violate Intel's TLB
499                          * Application note (317080) which says, while changing
500                          * the page sizes, new and old translations should
501                          * not differ with respect to page frame and
502                          * attributes.
503                          */
504                         if (page_size_mask & (1 << PG_LEVEL_1G))
505                                 continue;
506                         prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
507                 }
508
509                 if (page_size_mask & (1<<PG_LEVEL_1G)) {
510                         pages++;
511                         spin_lock(&init_mm.page_table_lock);
512                         set_pte((pte_t *)pud,
513                                 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
514                         spin_unlock(&init_mm.page_table_lock);
515                         last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
516                         continue;
517                 }
518
519                 pmd = alloc_low_page(&pmd_phys);
520                 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
521                                               prot);
522                 unmap_low_page(pmd);
523
524                 spin_lock(&init_mm.page_table_lock);
525                 pud_populate(&init_mm, pud, __va(pmd_phys));
526                 spin_unlock(&init_mm.page_table_lock);
527         }
528         __flush_tlb_all();
529
530         update_page_count(PG_LEVEL_1G, pages);
531
532         return last_map_addr;
533 }
534
535 static unsigned long __meminit
536 phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
537                  unsigned long page_size_mask)
538 {
539         pud_t *pud;
540
541         pud = (pud_t *)pgd_page_vaddr(*pgd);
542
543         return phys_pud_init(pud, addr, end, page_size_mask);
544 }
545
546 static void __init find_early_table_space(unsigned long end, int use_pse,
547                                           int use_gbpages)
548 {
549         unsigned long puds, pmds, ptes, tables, start;
550
551         puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
552         tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
553         if (use_gbpages) {
554                 unsigned long extra;
555                 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
556                 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
557         } else
558                 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
559         tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
560
561         if (use_pse) {
562                 unsigned long extra;
563                 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
564                 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
565         } else
566                 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
567         tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
568
569         /*
570          * RED-PEN putting page tables only on node 0 could
571          * cause a hotspot and fill up ZONE_DMA. The page tables
572          * need roughly 0.5KB per GB.
573          */
574         start = 0x8000;
575         table_start = find_e820_area(start, end, tables, PAGE_SIZE);
576         if (table_start == -1UL)
577                 panic("Cannot find space for the kernel page tables");
578
579         table_start >>= PAGE_SHIFT;
580         table_end = table_start;
581         table_top = table_start + (tables >> PAGE_SHIFT);
582
583         printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
584                 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
585 }
586
587 static void __init init_gbpages(void)
588 {
589         if (direct_gbpages && cpu_has_gbpages)
590                 printk(KERN_INFO "Using GB pages for direct mapping\n");
591         else
592                 direct_gbpages = 0;
593 }
594
595 static unsigned long __init kernel_physical_mapping_init(unsigned long start,
596                                                 unsigned long end,
597                                                 unsigned long page_size_mask)
598 {
599
600         unsigned long next, last_map_addr = end;
601
602         start = (unsigned long)__va(start);
603         end = (unsigned long)__va(end);
604
605         for (; start < end; start = next) {
606                 pgd_t *pgd = pgd_offset_k(start);
607                 unsigned long pud_phys;
608                 pud_t *pud;
609
610                 next = (start + PGDIR_SIZE) & PGDIR_MASK;
611                 if (next > end)
612                         next = end;
613
614                 if (pgd_val(*pgd)) {
615                         last_map_addr = phys_pud_update(pgd, __pa(start),
616                                                  __pa(end), page_size_mask);
617                         continue;
618                 }
619
620                 pud = alloc_low_page(&pud_phys);
621                 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
622                                                  page_size_mask);
623                 unmap_low_page(pud);
624
625                 spin_lock(&init_mm.page_table_lock);
626                 pgd_populate(&init_mm, pgd, __va(pud_phys));
627                 spin_unlock(&init_mm.page_table_lock);
628         }
629         __flush_tlb_all();
630
631         return last_map_addr;
632 }
633
634 struct map_range {
635         unsigned long start;
636         unsigned long end;
637         unsigned page_size_mask;
638 };
639
640 #define NR_RANGE_MR 5
641
642 static int save_mr(struct map_range *mr, int nr_range,
643                    unsigned long start_pfn, unsigned long end_pfn,
644                    unsigned long page_size_mask)
645 {
646
647         if (start_pfn < end_pfn) {
648                 if (nr_range >= NR_RANGE_MR)
649                         panic("run out of range for init_memory_mapping\n");
650                 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
651                 mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
652                 mr[nr_range].page_size_mask = page_size_mask;
653                 nr_range++;
654         }
655
656         return nr_range;
657 }
658
659 /*
660  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
661  * This runs before bootmem is initialized and gets pages directly from
662  * the physical memory. To access them they are temporarily mapped.
663  */
664 unsigned long __init_refok init_memory_mapping(unsigned long start,
665                                                unsigned long end)
666 {
667         unsigned long last_map_addr = 0;
668         unsigned long page_size_mask = 0;
669         unsigned long start_pfn, end_pfn;
670
671         struct map_range mr[NR_RANGE_MR];
672         int nr_range, i;
673         int use_pse, use_gbpages;
674
675         printk(KERN_INFO "init_memory_mapping\n");
676
677         /*
678          * Find space for the kernel direct mapping tables.
679          *
680          * Later we should allocate these tables in the local node of the
681          * memory mapped. Unfortunately this is done currently before the
682          * nodes are discovered.
683          */
684         if (!after_bootmem)
685                 init_gbpages();
686
687 #ifdef CONFIG_DEBUG_PAGEALLOC
688         /*
689          * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
690          * This will simplify cpa(), which otherwise needs to support splitting
691          * large pages into small in interrupt context, etc.
692          */
693         use_pse = use_gbpages = 0;
694 #else
695         use_pse = cpu_has_pse;
696         use_gbpages = direct_gbpages;
697 #endif
698
699         if (use_gbpages)
700                 page_size_mask |= 1 << PG_LEVEL_1G;
701         if (use_pse)
702                 page_size_mask |= 1 << PG_LEVEL_2M;
703
704         memset(mr, 0, sizeof(mr));
705         nr_range = 0;
706
707         /* head if not big page alignment ?*/
708         start_pfn = start >> PAGE_SHIFT;
709         end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
710                         << (PMD_SHIFT - PAGE_SHIFT);
711         nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
712
713         /* big page (2M) range*/
714         start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
715                          << (PMD_SHIFT - PAGE_SHIFT);
716         end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
717                          << (PUD_SHIFT - PAGE_SHIFT);
718         if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
719                 end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
720         nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
721                         page_size_mask & (1<<PG_LEVEL_2M));
722
723         /* big page (1G) range */
724         start_pfn = end_pfn;
725         end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
726         nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
727                                 page_size_mask &
728                                  ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
729
730         /* tail is not big page (1G) alignment */
731         start_pfn = end_pfn;
732         end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
733         nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
734                         page_size_mask & (1<<PG_LEVEL_2M));
735
736         /* tail is not big page (2M) alignment */
737         start_pfn = end_pfn;
738         end_pfn = end>>PAGE_SHIFT;
739         nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
740
741         /* try to merge same page size and continuous */
742         for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
743                 unsigned long old_start;
744                 if (mr[i].end != mr[i+1].start ||
745                     mr[i].page_size_mask != mr[i+1].page_size_mask)
746                         continue;
747                 /* move it */
748                 old_start = mr[i].start;
749                 memmove(&mr[i], &mr[i+1],
750                          (nr_range - 1 - i) * sizeof (struct map_range));
751                 mr[i].start = old_start;
752                 nr_range--;
753         }
754
755         for (i = 0; i < nr_range; i++)
756                 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
757                                 mr[i].start, mr[i].end,
758                         (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
759                          (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
760
761         if (!after_bootmem)
762                 find_early_table_space(end, use_pse, use_gbpages);
763
764         for (i = 0; i < nr_range; i++)
765                 last_map_addr = kernel_physical_mapping_init(
766                                         mr[i].start, mr[i].end,
767                                         mr[i].page_size_mask);
768
769         if (!after_bootmem)
770                 mmu_cr4_features = read_cr4();
771         __flush_tlb_all();
772
773         if (!after_bootmem && table_end > table_start)
774                 reserve_early(table_start << PAGE_SHIFT,
775                                  table_end << PAGE_SHIFT, "PGTABLE");
776
777         printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
778                          last_map_addr, end);
779
780         if (!after_bootmem)
781                 early_memtest(start, end);
782
783         return last_map_addr >> PAGE_SHIFT;
784 }
785
786 #ifndef CONFIG_NUMA
787 void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
788 {
789         unsigned long bootmap_size, bootmap;
790
791         bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
792         bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
793                                  PAGE_SIZE);
794         if (bootmap == -1L)
795                 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
796         /* don't touch min_low_pfn */
797         bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
798                                          0, end_pfn);
799         e820_register_active_regions(0, start_pfn, end_pfn);
800         free_bootmem_with_active_regions(0, end_pfn);
801         early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
802         reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
803 }
804
805 void __init paging_init(void)
806 {
807         unsigned long max_zone_pfns[MAX_NR_ZONES];
808
809         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
810         max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
811         max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
812         max_zone_pfns[ZONE_NORMAL] = max_pfn;
813
814         memory_present(0, 0, max_pfn);
815         sparse_init();
816         free_area_init_nodes(max_zone_pfns);
817 }
818 #endif
819
820 /*
821  * Memory hotplug specific functions
822  */
823 #ifdef CONFIG_MEMORY_HOTPLUG
824 /*
825  * Memory is added always to NORMAL zone. This means you will never get
826  * additional DMA/DMA32 memory.
827  */
828 int arch_add_memory(int nid, u64 start, u64 size)
829 {
830         struct pglist_data *pgdat = NODE_DATA(nid);
831         struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
832         unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
833         unsigned long nr_pages = size >> PAGE_SHIFT;
834         int ret;
835
836         last_mapped_pfn = init_memory_mapping(start, start + size-1);
837         if (last_mapped_pfn > max_pfn_mapped)
838                 max_pfn_mapped = last_mapped_pfn;
839
840         ret = __add_pages(zone, start_pfn, nr_pages);
841         WARN_ON(1);
842
843         return ret;
844 }
845 EXPORT_SYMBOL_GPL(arch_add_memory);
846
847 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
848 int memory_add_physaddr_to_nid(u64 start)
849 {
850         return 0;
851 }
852 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
853 #endif
854
855 #endif /* CONFIG_MEMORY_HOTPLUG */
856
857 /*
858  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
859  * is valid. The argument is a physical page number.
860  *
861  *
862  * On x86, access has to be given to the first megabyte of ram because that area
863  * contains bios code and data regions used by X and dosemu and similar apps.
864  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
865  * mmio resources as well as potential bios/acpi data regions.
866  */
867 int devmem_is_allowed(unsigned long pagenr)
868 {
869         if (pagenr <= 256)
870                 return 1;
871         if (!page_is_ram(pagenr))
872                 return 1;
873         return 0;
874 }
875
876
877 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
878                          kcore_modules, kcore_vsyscall;
879
880 void __init mem_init(void)
881 {
882         long codesize, reservedpages, datasize, initsize;
883
884         pci_iommu_alloc();
885
886         /* clear_bss() already clear the empty_zero_page */
887
888         reservedpages = 0;
889
890         /* this will put all low memory onto the freelists */
891 #ifdef CONFIG_NUMA
892         totalram_pages = numa_free_all_bootmem();
893 #else
894         totalram_pages = free_all_bootmem();
895 #endif
896         reservedpages = max_pfn - totalram_pages -
897                                         absent_pages_in_range(0, max_pfn);
898         after_bootmem = 1;
899
900         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
901         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
902         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
903
904         /* Register memory areas for /proc/kcore */
905         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
906         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
907                    VMALLOC_END-VMALLOC_START);
908         kclist_add(&kcore_kernel, &_stext, _end - _stext);
909         kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
910         kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
911                                  VSYSCALL_END - VSYSCALL_START);
912
913         printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
914                                 "%ldk reserved, %ldk data, %ldk init)\n",
915                 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
916                 max_pfn << (PAGE_SHIFT-10),
917                 codesize >> 10,
918                 reservedpages << (PAGE_SHIFT-10),
919                 datasize >> 10,
920                 initsize >> 10);
921 }
922
923 void free_init_pages(char *what, unsigned long begin, unsigned long end)
924 {
925         unsigned long addr = begin;
926
927         if (addr >= end)
928                 return;
929
930         /*
931          * If debugging page accesses then do not free this memory but
932          * mark them not present - any buggy init-section access will
933          * create a kernel page fault:
934          */
935 #ifdef CONFIG_DEBUG_PAGEALLOC
936         printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
937                 begin, PAGE_ALIGN(end));
938         set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
939 #else
940         printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
941
942         for (; addr < end; addr += PAGE_SIZE) {
943                 ClearPageReserved(virt_to_page(addr));
944                 init_page_count(virt_to_page(addr));
945                 memset((void *)(addr & ~(PAGE_SIZE-1)),
946                         POISON_FREE_INITMEM, PAGE_SIZE);
947                 free_page(addr);
948                 totalram_pages++;
949         }
950 #endif
951 }
952
953 void free_initmem(void)
954 {
955         free_init_pages("unused kernel memory",
956                         (unsigned long)(&__init_begin),
957                         (unsigned long)(&__init_end));
958 }
959
960 #ifdef CONFIG_DEBUG_RODATA
961 const int rodata_test_data = 0xC3;
962 EXPORT_SYMBOL_GPL(rodata_test_data);
963
964 void mark_rodata_ro(void)
965 {
966         unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
967         unsigned long rodata_start =
968                 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
969
970 #ifdef CONFIG_DYNAMIC_FTRACE
971         /* Dynamic tracing modifies the kernel text section */
972         start = rodata_start;
973 #endif
974
975         printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
976                (end - start) >> 10);
977         set_memory_ro(start, (end - start) >> PAGE_SHIFT);
978
979         /*
980          * The rodata section (but not the kernel text!) should also be
981          * not-executable.
982          */
983         set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
984
985         rodata_test();
986
987 #ifdef CONFIG_CPA_DEBUG
988         printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
989         set_memory_rw(start, (end-start) >> PAGE_SHIFT);
990
991         printk(KERN_INFO "Testing CPA: again\n");
992         set_memory_ro(start, (end-start) >> PAGE_SHIFT);
993 #endif
994 }
995
996 #endif
997
998 #ifdef CONFIG_BLK_DEV_INITRD
999 void free_initrd_mem(unsigned long start, unsigned long end)
1000 {
1001         free_init_pages("initrd memory", start, end);
1002 }
1003 #endif
1004
1005 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1006                                    int flags)
1007 {
1008 #ifdef CONFIG_NUMA
1009         int nid, next_nid;
1010         int ret;
1011 #endif
1012         unsigned long pfn = phys >> PAGE_SHIFT;
1013
1014         if (pfn >= max_pfn) {
1015                 /*
1016                  * This can happen with kdump kernels when accessing
1017                  * firmware tables:
1018                  */
1019                 if (pfn < max_pfn_mapped)
1020                         return -EFAULT;
1021
1022                 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
1023                                 phys, len);
1024                 return -EFAULT;
1025         }
1026
1027         /* Should check here against the e820 map to avoid double free */
1028 #ifdef CONFIG_NUMA
1029         nid = phys_to_nid(phys);
1030         next_nid = phys_to_nid(phys + len - 1);
1031         if (nid == next_nid)
1032                 ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
1033         else
1034                 ret = reserve_bootmem(phys, len, flags);
1035
1036         if (ret != 0)
1037                 return ret;
1038
1039 #else
1040         reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
1041 #endif
1042
1043         if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
1044                 dma_reserve += len / PAGE_SIZE;
1045                 set_dma_reserve(dma_reserve);
1046         }
1047
1048         return 0;
1049 }
1050
1051 int kern_addr_valid(unsigned long addr)
1052 {
1053         unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1054         pgd_t *pgd;
1055         pud_t *pud;
1056         pmd_t *pmd;
1057         pte_t *pte;
1058
1059         if (above != 0 && above != -1UL)
1060                 return 0;
1061
1062         pgd = pgd_offset_k(addr);
1063         if (pgd_none(*pgd))
1064                 return 0;
1065
1066         pud = pud_offset(pgd, addr);
1067         if (pud_none(*pud))
1068                 return 0;
1069
1070         pmd = pmd_offset(pud, addr);
1071         if (pmd_none(*pmd))
1072                 return 0;
1073
1074         if (pmd_large(*pmd))
1075                 return pfn_valid(pmd_pfn(*pmd));
1076
1077         pte = pte_offset_kernel(pmd, addr);
1078         if (pte_none(*pte))
1079                 return 0;
1080
1081         return pfn_valid(pte_pfn(*pte));
1082 }
1083
1084 /*
1085  * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
1086  * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1087  * not need special handling anymore:
1088  */
1089 static struct vm_area_struct gate_vma = {
1090         .vm_start       = VSYSCALL_START,
1091         .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
1092         .vm_page_prot   = PAGE_READONLY_EXEC,
1093         .vm_flags       = VM_READ | VM_EXEC
1094 };
1095
1096 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
1097 {
1098 #ifdef CONFIG_IA32_EMULATION
1099         if (test_tsk_thread_flag(tsk, TIF_IA32))
1100                 return NULL;
1101 #endif
1102         return &gate_vma;
1103 }
1104
1105 int in_gate_area(struct task_struct *task, unsigned long addr)
1106 {
1107         struct vm_area_struct *vma = get_gate_vma(task);
1108
1109         if (!vma)
1110                 return 0;
1111
1112         return (addr >= vma->vm_start) && (addr < vma->vm_end);
1113 }
1114
1115 /*
1116  * Use this when you have no reliable task/vma, typically from interrupt
1117  * context. It is less reliable than using the task's vma and may give
1118  * false positives:
1119  */
1120 int in_gate_area_no_task(unsigned long addr)
1121 {
1122         return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1123 }
1124
1125 const char *arch_vma_name(struct vm_area_struct *vma)
1126 {
1127         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
1128                 return "[vdso]";
1129         if (vma == &gate_vma)
1130                 return "[vsyscall]";
1131         return NULL;
1132 }
1133
1134 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1135 /*
1136  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1137  */
1138 static long __meminitdata addr_start, addr_end;
1139 static void __meminitdata *p_start, *p_end;
1140 static int __meminitdata node_start;
1141
1142 int __meminit
1143 vmemmap_populate(struct page *start_page, unsigned long size, int node)
1144 {
1145         unsigned long addr = (unsigned long)start_page;
1146         unsigned long end = (unsigned long)(start_page + size);
1147         unsigned long next;
1148         pgd_t *pgd;
1149         pud_t *pud;
1150         pmd_t *pmd;
1151
1152         for (; addr < end; addr = next) {
1153                 void *p = NULL;
1154
1155                 pgd = vmemmap_pgd_populate(addr, node);
1156                 if (!pgd)
1157                         return -ENOMEM;
1158
1159                 pud = vmemmap_pud_populate(pgd, addr, node);
1160                 if (!pud)
1161                         return -ENOMEM;
1162
1163                 if (!cpu_has_pse) {
1164                         next = (addr + PAGE_SIZE) & PAGE_MASK;
1165                         pmd = vmemmap_pmd_populate(pud, addr, node);
1166
1167                         if (!pmd)
1168                                 return -ENOMEM;
1169
1170                         p = vmemmap_pte_populate(pmd, addr, node);
1171
1172                         if (!p)
1173                                 return -ENOMEM;
1174
1175                         addr_end = addr + PAGE_SIZE;
1176                         p_end = p + PAGE_SIZE;
1177                 } else {
1178                         next = pmd_addr_end(addr, end);
1179
1180                         pmd = pmd_offset(pud, addr);
1181                         if (pmd_none(*pmd)) {
1182                                 pte_t entry;
1183
1184                                 p = vmemmap_alloc_block(PMD_SIZE, node);
1185                                 if (!p)
1186                                         return -ENOMEM;
1187
1188                                 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1189                                                 PAGE_KERNEL_LARGE);
1190                                 set_pmd(pmd, __pmd(pte_val(entry)));
1191
1192                                 /* check to see if we have contiguous blocks */
1193                                 if (p_end != p || node_start != node) {
1194                                         if (p_start)
1195                                                 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1196                                                        addr_start, addr_end-1, p_start, p_end-1, node_start);
1197                                         addr_start = addr;
1198                                         node_start = node;
1199                                         p_start = p;
1200                                 }
1201
1202                                 addr_end = addr + PMD_SIZE;
1203                                 p_end = p + PMD_SIZE;
1204                         } else
1205                                 vmemmap_verify((pte_t *)pmd, node, addr, next);
1206                 }
1207
1208         }
1209         return 0;
1210 }
1211
1212 void __meminit vmemmap_populate_print_last(void)
1213 {
1214         if (p_start) {
1215                 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1216                         addr_start, addr_end-1, p_start, p_end-1, node_start);
1217                 p_start = NULL;
1218                 p_end = NULL;
1219                 node_start = 0;
1220         }
1221 }
1222 #endif