2 * Virtual Memory Map support
4 * (C) 2007 sgi. Christoph Lameter.
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
12 * via TLBs. For those arches the virtual memmory map is essentially
13 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
17 * The architecture is expected to provide a vmemmap_populate() function
18 * to instantiate the mapping.
21 #include <linux/mmzone.h>
22 #include <linux/bootmem.h>
23 #include <linux/highmem.h>
24 #include <linux/module.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <linux/sched.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
33 * Allocate a block of memory to be used to back the virtual memory map
34 * or to back the page tables that are used to create the mapping.
35 * Uses the main allocators if they are available, else bootmem.
38 static void * __init_refok __earlyonly_bootmem_alloc(int node,
43 return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
47 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
49 /* If the main allocator is up use that, fallback to bootmem. */
50 if (slab_is_available()) {
51 struct page *page = alloc_pages_node(node,
52 GFP_KERNEL | __GFP_ZERO, get_order(size));
54 return page_address(page);
57 return __earlyonly_bootmem_alloc(node, size, size,
58 __pa(MAX_DMA_ADDRESS));
61 void __meminit vmemmap_verify(pte_t *pte, int node,
62 unsigned long start, unsigned long end)
64 unsigned long pfn = pte_pfn(*pte);
65 int actual_node = early_pfn_to_nid(pfn);
67 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
68 printk(KERN_WARNING "[%lx-%lx] potential offnode "
69 "page_structs\n", start, end - 1);
72 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
74 pte_t *pte = pte_offset_kernel(pmd, addr);
77 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
80 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
81 set_pte_at(&init_mm, addr, pte, entry);
86 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
88 pmd_t *pmd = pmd_offset(pud, addr);
90 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
93 pmd_populate_kernel(&init_mm, pmd, p);
98 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100 pud_t *pud = pud_offset(pgd, addr);
101 if (pud_none(*pud)) {
102 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
105 pud_populate(&init_mm, pud, p);
110 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
112 pgd_t *pgd = pgd_offset_k(addr);
113 if (pgd_none(*pgd)) {
114 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
117 pgd_populate(&init_mm, pgd, p);
122 int __meminit vmemmap_populate_basepages(struct page *start_page,
123 unsigned long size, int node)
125 unsigned long addr = (unsigned long)start_page;
126 unsigned long end = (unsigned long)(start_page + size);
132 for (; addr < end; addr += PAGE_SIZE) {
133 pgd = vmemmap_pgd_populate(addr, node);
136 pud = vmemmap_pud_populate(pgd, addr, node);
139 pmd = vmemmap_pmd_populate(pud, addr, node);
142 pte = vmemmap_pte_populate(pmd, addr, node);
145 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
151 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
153 struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
154 int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);