2 * sparse memory mappings.
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
16 * Permanent SPARSEMEM data:
18 * 1) mem_section - memory sections, mem_map's for valid memory
20 #ifdef CONFIG_SPARSEMEM_EXTREME
21 struct mem_section *mem_section[NR_SECTION_ROOTS]
22 ____cacheline_internodealigned_in_smp;
24 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
25 ____cacheline_internodealigned_in_smp;
27 EXPORT_SYMBOL(mem_section);
29 #ifdef NODE_NOT_IN_PAGE_FLAGS
31 * If we did not store the node number in the page then we have to
32 * do a lookup in the section_to_node_table in order to find which
33 * node the page belongs to.
35 #if MAX_NUMNODES <= 256
36 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
38 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41 int page_to_nid(struct page *page)
43 return section_to_node_table[page_to_section(page)];
45 EXPORT_SYMBOL(page_to_nid);
47 static void set_section_nid(unsigned long section_nr, int nid)
49 section_to_node_table[section_nr] = nid;
51 #else /* !NODE_NOT_IN_PAGE_FLAGS */
52 static inline void set_section_nid(unsigned long section_nr, int nid)
57 #ifdef CONFIG_SPARSEMEM_EXTREME
58 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
60 struct mem_section *section = NULL;
61 unsigned long array_size = SECTIONS_PER_ROOT *
62 sizeof(struct mem_section);
64 if (slab_is_available())
65 section = kmalloc_node(array_size, GFP_KERNEL, nid);
67 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
70 memset(section, 0, array_size);
75 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
77 static DEFINE_SPINLOCK(index_init_lock);
78 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
79 struct mem_section *section;
82 if (mem_section[root])
85 section = sparse_index_alloc(nid);
89 * This lock keeps two different sections from
90 * reallocating for the same index
92 spin_lock(&index_init_lock);
94 if (mem_section[root]) {
99 mem_section[root] = section;
101 spin_unlock(&index_init_lock);
104 #else /* !SPARSEMEM_EXTREME */
105 static inline int sparse_index_init(unsigned long section_nr, int nid)
112 * Although written for the SPARSEMEM_EXTREME case, this happens
113 * to also work for the flat array case because
114 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
116 int __section_nr(struct mem_section* ms)
118 unsigned long root_nr;
119 struct mem_section* root;
121 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
122 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
126 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
130 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
134 * During early boot, before section_mem_map is used for an actual
135 * mem_map, we use section_mem_map to store the section's NUMA
136 * node. This keeps us from having to use another data structure. The
137 * node information is cleared just before we store the real mem_map.
139 static inline unsigned long sparse_encode_early_nid(int nid)
141 return (nid << SECTION_NID_SHIFT);
144 static inline int sparse_early_nid(struct mem_section *section)
146 return (section->section_mem_map >> SECTION_NID_SHIFT);
149 /* Record a memory area against a node. */
150 void __init memory_present(int nid, unsigned long start, unsigned long end)
154 start &= PAGE_SECTION_MASK;
155 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
156 unsigned long section = pfn_to_section_nr(pfn);
157 struct mem_section *ms;
159 sparse_index_init(section, nid);
160 set_section_nid(section, nid);
162 ms = __nr_to_section(section);
163 if (!ms->section_mem_map)
164 ms->section_mem_map = sparse_encode_early_nid(nid) |
165 SECTION_MARKED_PRESENT;
170 * Only used by the i386 NUMA architecures, but relatively
173 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
174 unsigned long end_pfn)
177 unsigned long nr_pages = 0;
179 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
180 if (nid != early_pfn_to_nid(pfn))
183 if (pfn_present(pfn))
184 nr_pages += PAGES_PER_SECTION;
187 return nr_pages * sizeof(struct page);
191 * Subtle, we encode the real pfn into the mem_map such that
192 * the identity pfn - section_mem_map will return the actual
193 * physical page frame number.
195 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
197 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
201 * We need this if we ever free the mem_maps. While not implemented yet,
202 * this function is included for parity with its sibling.
204 static __attribute((unused))
205 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
207 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
210 static int __meminit sparse_init_one_section(struct mem_section *ms,
211 unsigned long pnum, struct page *mem_map,
212 unsigned long *pageblock_bitmap)
214 if (!present_section(ms))
217 ms->section_mem_map &= ~SECTION_MAP_MASK;
218 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
220 ms->pageblock_flags = pageblock_bitmap;
225 static unsigned long usemap_size(void)
227 unsigned long size_bytes;
228 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
229 size_bytes = roundup(size_bytes, sizeof(unsigned long));
233 #ifdef CONFIG_MEMORY_HOTPLUG
234 static unsigned long *__kmalloc_section_usemap(void)
236 return kmalloc(usemap_size(), GFP_KERNEL);
238 #endif /* CONFIG_MEMORY_HOTPLUG */
240 static unsigned long *sparse_early_usemap_alloc(unsigned long pnum)
242 unsigned long *usemap;
243 struct mem_section *ms = __nr_to_section(pnum);
244 int nid = sparse_early_nid(ms);
246 usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size());
250 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
253 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
257 #ifndef CONFIG_SPARSEMEM_VMEMMAP
258 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
262 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
266 map = alloc_bootmem_node(NODE_DATA(nid),
267 sizeof(struct page) * PAGES_PER_SECTION);
270 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
272 struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
275 struct mem_section *ms = __nr_to_section(pnum);
276 int nid = sparse_early_nid(ms);
278 map = sparse_mem_map_populate(pnum, nid);
282 printk(KERN_ERR "%s: sparsemem memory map backing failed "
283 "some memory will not be available.\n", __FUNCTION__);
284 ms->section_mem_map = 0;
289 * Allocate the accumulated non-linear sections, allocate a mem_map
290 * for each and record the physical to section mapping.
292 void __init sparse_init(void)
296 unsigned long *usemap;
298 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
299 if (!present_section_nr(pnum))
302 map = sparse_early_mem_map_alloc(pnum);
306 usemap = sparse_early_usemap_alloc(pnum);
310 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
315 #ifdef CONFIG_MEMORY_HOTPLUG
316 #ifdef CONFIG_SPARSEMEM_VMEMMAP
317 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
318 unsigned long nr_pages)
320 /* This will make the necessary allocations eventually. */
321 return sparse_mem_map_populate(pnum, nid);
323 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
325 return; /* XXX: Not implemented yet */
328 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
330 struct page *page, *ret;
331 unsigned long memmap_size = sizeof(struct page) * nr_pages;
333 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
337 ret = vmalloc(memmap_size);
343 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
345 memset(ret, 0, memmap_size);
350 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
351 unsigned long nr_pages)
353 return __kmalloc_section_memmap(nr_pages);
356 static int vaddr_in_vmalloc_area(void *addr)
358 if (addr >= (void *)VMALLOC_START &&
359 addr < (void *)VMALLOC_END)
364 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
366 if (vaddr_in_vmalloc_area(memmap))
369 free_pages((unsigned long)memmap,
370 get_order(sizeof(struct page) * nr_pages));
372 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
375 * returns the number of sections whose mem_maps were properly
376 * set. If this is <=0, then that means that the passed-in
377 * map was not consumed and must be freed.
379 int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
382 unsigned long section_nr = pfn_to_section_nr(start_pfn);
383 struct pglist_data *pgdat = zone->zone_pgdat;
384 struct mem_section *ms;
386 unsigned long *usemap;
391 * no locking for this, because it does its own
392 * plus, it does a kmalloc
394 ret = sparse_index_init(section_nr, pgdat->node_id);
395 if (ret < 0 && ret != -EEXIST)
397 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
400 usemap = __kmalloc_section_usemap();
402 __kfree_section_memmap(memmap, nr_pages);
406 pgdat_resize_lock(pgdat, &flags);
408 ms = __pfn_to_section(start_pfn);
409 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
414 ms->section_mem_map |= SECTION_MARKED_PRESENT;
416 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
419 pgdat_resize_unlock(pgdat, &flags);
422 __kfree_section_memmap(memmap, nr_pages);