2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
16 #include <asm/proto.h>
25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26 bootmem_data_t plat_node_bdata[MAX_NUMNODES];
28 struct memnode memnode;
30 unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
33 unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
36 cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
38 int numa_off __initdata;
39 unsigned long __initdata nodemap_addr;
40 unsigned long __initdata nodemap_size;
44 * Given a shift value, try to populate memnodemap[]
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
51 populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
55 unsigned long addr, end;
57 memset(memnodemap, 0xff, memnodemapsize);
58 for (i = 0; i < numnodes; i++) {
59 addr = nodes[i].start;
63 if ((end >> shift) >= memnodemapsize)
66 if (memnodemap[addr >> shift] != 0xff)
68 memnodemap[addr >> shift] = i;
69 addr += (1UL << shift);
76 static int __init allocate_cachealigned_memnodemap(void)
78 unsigned long pad, pad_addr;
80 memnodemap = memnode.embedded_map;
81 if (memnodemapsize <= 48)
84 pad = L1_CACHE_BYTES - 1;
86 nodemap_size = pad + memnodemapsize;
87 nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
89 if (nodemap_addr == -1UL) {
91 "NUMA: Unable to allocate Memory to Node hash map\n");
92 nodemap_addr = nodemap_size = 0;
95 pad_addr = (nodemap_addr + pad) & ~pad;
96 memnodemap = phys_to_virt(pad_addr);
98 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
99 nodemap_addr, nodemap_addr + nodemap_size);
104 * The LSB of all start and end addresses in the node map is the value of the
105 * maximum possible shift.
108 extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
110 int i, nodes_used = 0;
111 unsigned long start, end;
112 unsigned long bitfield = 0, memtop = 0;
114 for (i = 0; i < numnodes; i++) {
115 start = nodes[i].start;
127 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
128 memnodemapsize = (memtop >> i)+1;
132 int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
136 shift = extract_lsb_from_nodes(nodes, numnodes);
137 if (allocate_cachealigned_memnodemap())
139 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
142 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
144 "Your memory is not aligned you need to rebuild your kernel "
145 "with a bigger NODEMAPSIZE shift=%d\n",
152 #ifdef CONFIG_SPARSEMEM
153 int early_pfn_to_nid(unsigned long pfn)
155 return phys_to_nid(pfn << PAGE_SHIFT);
160 early_node_mem(int nodeid, unsigned long start, unsigned long end,
163 unsigned long mem = find_e820_area(start, end, size);
167 ptr = __alloc_bootmem_nopanic(size,
168 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
170 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
177 /* Initialize bootmem allocator for a node */
178 void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
180 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
181 unsigned long nodedata_phys;
183 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
185 start = round_up(start, ZONE_ALIGN);
187 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
189 start_pfn = start >> PAGE_SHIFT;
190 end_pfn = end >> PAGE_SHIFT;
192 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
193 if (node_data[nodeid] == NULL)
195 nodedata_phys = __pa(node_data[nodeid]);
197 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
198 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
199 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
200 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
202 /* Find a place for the bootmem map */
203 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
204 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
205 bootmap = early_node_mem(nodeid, bootmap_start, end,
206 bootmap_pages<<PAGE_SHIFT);
207 if (bootmap == NULL) {
208 if (nodedata_phys < start || nodedata_phys >= end)
209 free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
210 node_data[nodeid] = NULL;
213 bootmap_start = __pa(bootmap);
214 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
216 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
217 bootmap_start >> PAGE_SHIFT,
220 free_bootmem_with_active_regions(nodeid, end);
222 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
223 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
224 #ifdef CONFIG_ACPI_NUMA
225 srat_reserve_add_area(nodeid);
227 node_set_online(nodeid);
230 /* Initialize final allocator for a zone */
231 void __init setup_node_zones(int nodeid)
233 unsigned long start_pfn, end_pfn, memmapsize, limit;
235 start_pfn = node_start_pfn(nodeid);
236 end_pfn = node_end_pfn(nodeid);
238 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
239 nodeid, start_pfn, end_pfn);
241 /* Try to allocate mem_map at end to not fill up precious <4GB
243 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
244 limit = end_pfn << PAGE_SHIFT;
245 #ifdef CONFIG_FLAT_NODE_MEM_MAP
246 NODE_DATA(nodeid)->node_mem_map =
247 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
248 memmapsize, SMP_CACHE_BYTES,
249 round_down(limit - memmapsize, PAGE_SIZE),
254 void __init numa_init_array(void)
257 /* There are unfortunately some poorly designed mainboards around
258 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
259 mapping. To avoid this fill in the mapping for all possible
260 CPUs, as the number of CPUs is not known yet.
261 We round robin the existing nodes. */
262 rr = first_node(node_online_map);
263 for (i = 0; i < NR_CPUS; i++) {
264 if (cpu_to_node[i] != NUMA_NO_NODE)
266 numa_set_node(i, rr);
267 rr = next_node(rr, node_online_map);
268 if (rr == MAX_NUMNODES)
269 rr = first_node(node_online_map);
274 #ifdef CONFIG_NUMA_EMU
276 #define E820_ADDR_HOLE_SIZE(start, end) \
277 (e820_hole_size((start) >> PAGE_SHIFT, (end) >> PAGE_SHIFT) << \
279 char *cmdline __initdata;
282 * Setups up nid to range from addr to addr + size. If the end boundary is
283 * greater than max_addr, then max_addr is used instead. The return value is 0
284 * if there is additional memory left for allocation past addr and -1 otherwise.
285 * addr is adjusted to be at the end of the node.
287 static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
288 u64 size, u64 max_addr)
291 nodes[nid].start = *addr;
293 if (*addr >= max_addr) {
297 nodes[nid].end = *addr;
298 node_set(nid, node_possible_map);
299 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
300 nodes[nid].start, nodes[nid].end,
301 (nodes[nid].end - nodes[nid].start) >> 20);
306 * Splits num_nodes nodes up equally starting at node_start. The return value
307 * is the number of nodes split up and addr is adjusted to be at the end of the
308 * last node allocated.
310 static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
311 u64 max_addr, int node_start,
320 if (num_nodes > MAX_NUMNODES)
321 num_nodes = MAX_NUMNODES;
322 size = (max_addr - *addr - E820_ADDR_HOLE_SIZE(*addr, max_addr)) /
325 * Calculate the number of big nodes that can be allocated as a result
326 * of consolidating the leftovers.
328 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
331 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
332 size &= FAKE_NODE_MIN_HASH_MASK;
334 printk(KERN_ERR "Not enough memory for each node. "
335 "NUMA emulation disabled.\n");
339 for (i = node_start; i < num_nodes + node_start; i++) {
340 u64 end = *addr + size;
342 end += FAKE_NODE_MIN_SIZE;
344 * The final node can have the remaining system RAM. Other
345 * nodes receive roughly the same amount of available pages.
347 if (i == num_nodes + node_start - 1)
350 while (end - *addr - E820_ADDR_HOLE_SIZE(*addr, end) <
352 end += FAKE_NODE_MIN_SIZE;
353 if (end > max_addr) {
358 if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
361 return i - node_start + 1;
365 * Splits the remaining system RAM into chunks of size. The remaining memory is
366 * always assigned to a final node and can be asymmetric. Returns the number of
369 static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
370 u64 max_addr, int node_start, u64 size)
373 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
374 while (!setup_node_range(i++, nodes, addr, size, max_addr))
376 return i - node_start;
380 * Sets up the system RAM area from start_pfn to end_pfn according to the
381 * numa=fake command-line option.
383 static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
385 struct bootnode nodes[MAX_NUMNODES];
386 u64 addr = start_pfn << PAGE_SHIFT;
387 u64 max_addr = end_pfn << PAGE_SHIFT;
395 memset(&nodes, 0, sizeof(nodes));
397 * If the numa=fake command-line is just a single number N, split the
398 * system RAM into N fake nodes.
400 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
401 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0,
402 simple_strtol(cmdline, NULL, 0));
408 /* Parse the command line. */
409 for (coeff_flag = 0; ; cmdline++) {
410 if (*cmdline && isdigit(*cmdline)) {
411 num = num * 10 + *cmdline - '0';
414 if (*cmdline == '*') {
419 if (!*cmdline || *cmdline == ',') {
423 * Round down to the nearest FAKE_NODE_MIN_SIZE.
424 * Command-line coefficients are in megabytes.
426 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
428 for (i = 0; i < coeff; i++, num_nodes++)
429 if (setup_node_range(num_nodes, nodes,
430 &addr, size, max_addr) < 0)
442 /* Fill remainder of system RAM, if appropriate. */
443 if (addr < max_addr) {
444 if (coeff_flag && coeff < 0) {
445 /* Split remaining nodes into num-sized chunks */
446 num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
450 switch (*(cmdline - 1)) {
452 /* Split remaining nodes into coeff chunks */
455 num_nodes += split_nodes_equally(nodes, &addr, max_addr,
459 /* Do not allocate remaining system RAM */
462 /* Give one final node */
463 setup_node_range(num_nodes, nodes, &addr,
464 max_addr - addr, max_addr);
469 memnode_shift = compute_hash_shift(nodes, num_nodes);
470 if (memnode_shift < 0) {
472 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
478 * We need to vacate all active ranges that may have been registered by
481 remove_all_active_ranges();
482 for_each_node_mask(i, node_possible_map) {
483 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
484 nodes[i].end >> PAGE_SHIFT);
485 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
490 #undef E820_ADDR_HOLE_SIZE
491 #endif /* CONFIG_NUMA_EMU */
493 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
497 nodes_clear(node_possible_map);
499 #ifdef CONFIG_NUMA_EMU
500 if (cmdline && !numa_emulation(start_pfn, end_pfn))
502 nodes_clear(node_possible_map);
505 #ifdef CONFIG_ACPI_NUMA
506 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
507 end_pfn << PAGE_SHIFT))
509 nodes_clear(node_possible_map);
512 #ifdef CONFIG_K8_NUMA
513 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
515 nodes_clear(node_possible_map);
517 printk(KERN_INFO "%s\n",
518 numa_off ? "NUMA turned off" : "No NUMA configuration found");
520 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
521 start_pfn << PAGE_SHIFT,
522 end_pfn << PAGE_SHIFT);
523 /* setup dummy node covering all memory */
525 memnodemap = memnode.embedded_map;
527 nodes_clear(node_online_map);
529 node_set(0, node_possible_map);
530 for (i = 0; i < NR_CPUS; i++)
532 node_to_cpumask[0] = cpumask_of_cpu(0);
533 e820_register_active_regions(0, start_pfn, end_pfn);
534 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
537 __cpuinit void numa_add_cpu(int cpu)
539 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
542 void __cpuinit numa_set_node(int cpu, int node)
544 cpu_pda(cpu)->nodenumber = node;
545 cpu_to_node[cpu] = node;
548 unsigned long __init numa_free_all_bootmem(void)
551 unsigned long pages = 0;
552 for_each_online_node(i) {
553 pages += free_all_bootmem_node(NODE_DATA(i));
558 void __init paging_init(void)
561 unsigned long max_zone_pfns[MAX_NR_ZONES];
562 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
563 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
564 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
565 max_zone_pfns[ZONE_NORMAL] = end_pfn;
567 sparse_memory_present_with_active_regions(MAX_NUMNODES);
570 for_each_online_node(i) {
574 free_area_init_nodes(max_zone_pfns);
577 static __init int numa_setup(char *opt)
581 if (!strncmp(opt,"off",3))
583 #ifdef CONFIG_NUMA_EMU
584 if (!strncmp(opt, "fake=", 5))
587 #ifdef CONFIG_ACPI_NUMA
588 if (!strncmp(opt,"noacpi",6))
590 if (!strncmp(opt,"hotadd=", 7))
591 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
596 early_param("numa", numa_setup);
599 * Setup early cpu_to_node.
601 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
602 * and apicid_to_node[] tables have valid entries for a CPU.
603 * This means we skip cpu_to_node[] initialisation for NUMA
604 * emulation and faking node case (when running a kernel compiled
605 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
606 * is already initialized in a round robin manner at numa_init_array,
607 * prior to this call, and this initialization is good enough
608 * for the fake NUMA cases.
610 void __init init_cpu_to_node(void)
613 for (i = 0; i < NR_CPUS; i++) {
614 u8 apicid = x86_cpu_to_apicid[i];
615 if (apicid == BAD_APICID)
617 if (apicid_to_node[apicid] == NUMA_NO_NODE)
619 numa_set_node(i,apicid_to_node[apicid]);
623 EXPORT_SYMBOL(cpu_to_node);
624 EXPORT_SYMBOL(node_to_cpumask);
625 EXPORT_SYMBOL(memnode);
626 EXPORT_SYMBOL(node_data);
628 #ifdef CONFIG_DISCONTIGMEM
630 * Functions to convert PFNs from/to per node page addresses.
631 * These are out of line because they are quite big.
632 * They could be all tuned by pre caching more state.
636 int pfn_valid(unsigned long pfn)
639 if (pfn >= num_physpages)
641 nid = pfn_to_nid(pfn);
644 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
646 EXPORT_SYMBOL(pfn_valid);