4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <asm/sparsemem.h>
22 #include <asm/system.h>
25 static int numa_enabled = 1;
27 static int numa_debug;
28 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 int numa_cpu_lookup_table[NR_CPUS];
31 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
32 struct pglist_data *node_data[MAX_NUMNODES];
34 EXPORT_SYMBOL(numa_cpu_lookup_table);
35 EXPORT_SYMBOL(numa_cpumask_lookup_table);
36 EXPORT_SYMBOL(node_data);
38 static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
39 static int min_common_depth;
40 static int n_mem_addr_cells, n_mem_size_cells;
42 static void __cpuinit map_cpu_to_node(int cpu, int node)
44 numa_cpu_lookup_table[cpu] = node;
46 dbg("adding cpu %d to node %d\n", cpu, node);
48 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
49 cpu_set(cpu, numa_cpumask_lookup_table[node]);
52 #ifdef CONFIG_HOTPLUG_CPU
53 static void unmap_cpu_from_node(unsigned long cpu)
55 int node = numa_cpu_lookup_table[cpu];
57 dbg("removing cpu %lu from node %d\n", cpu, node);
59 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
60 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
62 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
66 #endif /* CONFIG_HOTPLUG_CPU */
68 static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
70 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
71 struct device_node *cpu_node = NULL;
72 const unsigned int *interrupt_server, *reg;
75 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
76 /* Try interrupt server first */
77 interrupt_server = of_get_property(cpu_node,
78 "ibm,ppc-interrupt-server#s", &len);
80 len = len / sizeof(u32);
82 if (interrupt_server && (len > 0)) {
84 if (interrupt_server[len] == hw_cpuid)
88 reg = of_get_property(cpu_node, "reg", &len);
89 if (reg && (len > 0) && (reg[0] == hw_cpuid))
97 /* must hold reference to node during call */
98 static const int *of_get_associativity(struct device_node *dev)
100 return of_get_property(dev, "ibm,associativity", NULL);
103 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
106 static int of_node_to_nid_single(struct device_node *device)
109 const unsigned int *tmp;
111 if (min_common_depth == -1)
114 tmp = of_get_associativity(device);
118 if (tmp[0] >= min_common_depth)
119 nid = tmp[min_common_depth];
121 /* POWER4 LPAR uses 0xffff as invalid node */
122 if (nid == 0xffff || nid >= MAX_NUMNODES)
128 /* Walk the device tree upwards, looking for an associativity id */
129 int of_node_to_nid(struct device_node *device)
131 struct device_node *tmp;
136 nid = of_node_to_nid_single(device);
141 device = of_get_parent(tmp);
148 EXPORT_SYMBOL_GPL(of_node_to_nid);
151 * In theory, the "ibm,associativity" property may contain multiple
152 * associativity lists because a resource may be multiply connected
153 * into the machine. This resource then has different associativity
154 * characteristics relative to its multiple connections. We ignore
155 * this for now. We also assume that all cpu and memory sets have
156 * their distances represented at a common level. This won't be
157 * true for hierarchical NUMA.
159 * In any case the ibm,associativity-reference-points should give
160 * the correct depth for a normal NUMA system.
162 * - Dave Hansen <haveblue@us.ibm.com>
164 static int __init find_min_common_depth(void)
167 const unsigned int *ref_points;
168 struct device_node *rtas_root;
171 rtas_root = of_find_node_by_path("/rtas");
177 * this property is 2 32-bit integers, each representing a level of
178 * depth in the associativity nodes. The first is for an SMP
179 * configuration (should be all 0's) and the second is for a normal
180 * NUMA configuration.
182 ref_points = of_get_property(rtas_root,
183 "ibm,associativity-reference-points", &len);
185 if ((len >= 1) && ref_points) {
186 depth = ref_points[1];
188 dbg("NUMA: ibm,associativity-reference-points not found.\n");
191 of_node_put(rtas_root);
196 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
198 struct device_node *memory = NULL;
200 memory = of_find_node_by_type(memory, "memory");
202 panic("numa.c: No memory nodes found!");
204 *n_addr_cells = of_n_addr_cells(memory);
205 *n_size_cells = of_n_size_cells(memory);
209 static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
211 unsigned long result = 0;
214 result = (result << 32) | **buf;
221 * Figure out to which domain a cpu belongs and stick it there.
222 * Return the id of the domain used.
224 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
227 struct device_node *cpu = find_cpu_node(lcpu);
234 nid = of_node_to_nid_single(cpu);
236 if (nid < 0 || !node_online(nid))
237 nid = any_online_node(NODE_MASK_ALL);
239 map_cpu_to_node(lcpu, nid);
246 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
247 unsigned long action,
250 unsigned long lcpu = (unsigned long)hcpu;
251 int ret = NOTIFY_DONE;
255 case CPU_UP_PREPARE_FROZEN:
256 numa_setup_cpu(lcpu);
259 #ifdef CONFIG_HOTPLUG_CPU
261 case CPU_DEAD_FROZEN:
262 case CPU_UP_CANCELED:
263 case CPU_UP_CANCELED_FROZEN:
264 unmap_cpu_from_node(lcpu);
273 * Check and possibly modify a memory region to enforce the memory limit.
275 * Returns the size the region should have to enforce the memory limit.
276 * This will either be the original value of size, a truncated value,
277 * or zero. If the returned value of size is 0 the region should be
278 * discarded as it lies wholy above the memory limit.
280 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
284 * We use lmb_end_of_DRAM() in here instead of memory_limit because
285 * we've already adjusted it for the limit and it takes care of
286 * having memory holes below the limit.
292 if (start + size <= lmb_end_of_DRAM())
295 if (start >= lmb_end_of_DRAM())
298 return lmb_end_of_DRAM() - start;
302 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
303 * node. This assumes n_mem_{addr,size}_cells have been set.
305 static void __init parse_drconf_memory(struct device_node *memory)
307 const unsigned int *lm, *dm, *aa;
308 unsigned int ls, ld, la;
309 unsigned int n, aam, aalen;
310 unsigned long lmb_size, size;
311 int nid, default_nid = 0;
312 unsigned int start, ai, flags;
314 lm = of_get_property(memory, "ibm,lmb-size", &ls);
315 dm = of_get_property(memory, "ibm,dynamic-memory", &ld);
316 aa = of_get_property(memory, "ibm,associativity-lookup-arrays", &la);
317 if (!lm || !dm || !aa ||
318 ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
319 la < 2 * sizeof(unsigned int))
322 lmb_size = read_n_cells(n_mem_size_cells, &lm);
323 n = *dm++; /* number of LMBs */
324 aam = *aa++; /* number of associativity lists */
325 aalen = *aa++; /* length of each associativity list */
326 if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
327 la < (aam * aalen + 2) * sizeof(unsigned int))
330 for (; n != 0; --n) {
331 start = read_n_cells(n_mem_addr_cells, &dm);
335 /* 0x80 == reserved, 0x8 = assigned to us */
336 if ((flags & 0x80) || !(flags & 0x8))
339 /* flags & 0x40 means associativity index is invalid */
340 if (min_common_depth > 0 && min_common_depth <= aalen &&
341 (flags & 0x40) == 0 && ai < aam) {
342 /* this is like of_node_to_nid_single */
343 nid = aa[ai * aalen + min_common_depth - 1];
344 if (nid == 0xffff || nid >= MAX_NUMNODES)
347 node_set_online(nid);
349 size = numa_enforce_memory_limit(start, lmb_size);
353 add_active_range(nid, start >> PAGE_SHIFT,
354 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
358 static int __init parse_numa_properties(void)
360 struct device_node *cpu = NULL;
361 struct device_node *memory = NULL;
365 if (numa_enabled == 0) {
366 printk(KERN_WARNING "NUMA disabled by user\n");
370 min_common_depth = find_min_common_depth();
372 if (min_common_depth < 0)
373 return min_common_depth;
375 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
378 * Even though we connect cpus to numa domains later in SMP
379 * init, we need to know the node ids now. This is because
380 * each node to be onlined must have NODE_DATA etc backing it.
382 for_each_present_cpu(i) {
385 cpu = find_cpu_node(i);
387 nid = of_node_to_nid_single(cpu);
391 * Don't fall back to default_nid yet -- we will plug
392 * cpus into nodes once the memory scan has discovered
397 node_set_online(nid);
400 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
402 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
407 const unsigned int *memcell_buf;
410 memcell_buf = of_get_property(memory,
411 "linux,usable-memory", &len);
412 if (!memcell_buf || len <= 0)
413 memcell_buf = of_get_property(memory, "reg", &len);
414 if (!memcell_buf || len <= 0)
418 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
420 /* these are order-sensitive, and modify the buffer pointer */
421 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
422 size = read_n_cells(n_mem_size_cells, &memcell_buf);
425 * Assumption: either all memory nodes or none will
426 * have associativity properties. If none, then
427 * everything goes to default_nid.
429 nid = of_node_to_nid_single(memory);
432 node_set_online(nid);
434 if (!(size = numa_enforce_memory_limit(start, size))) {
441 add_active_range(nid, start >> PAGE_SHIFT,
442 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
449 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
450 * property in the ibm,dynamic-reconfiguration-memory node.
452 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
454 parse_drconf_memory(memory);
459 static void __init setup_nonnuma(void)
461 unsigned long top_of_ram = lmb_end_of_DRAM();
462 unsigned long total_ram = lmb_phys_mem_size();
463 unsigned long start_pfn, end_pfn;
466 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
467 top_of_ram, total_ram);
468 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
469 (top_of_ram - total_ram) >> 20);
471 for (i = 0; i < lmb.memory.cnt; ++i) {
472 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
473 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
474 add_active_range(0, start_pfn, end_pfn);
479 void __init dump_numa_cpu_topology(void)
482 unsigned int cpu, count;
484 if (min_common_depth == -1 || !numa_enabled)
487 for_each_online_node(node) {
488 printk(KERN_DEBUG "Node %d CPUs:", node);
492 * If we used a CPU iterator here we would miss printing
493 * the holes in the cpumap.
495 for (cpu = 0; cpu < NR_CPUS; cpu++) {
496 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
502 printk("-%u", cpu - 1);
508 printk("-%u", NR_CPUS - 1);
513 static void __init dump_numa_memory_topology(void)
518 if (min_common_depth == -1 || !numa_enabled)
521 for_each_online_node(node) {
524 printk(KERN_DEBUG "Node %d Memory:", node);
528 for (i = 0; i < lmb_end_of_DRAM();
529 i += (1 << SECTION_SIZE_BITS)) {
530 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
548 * Allocate some memory, satisfying the lmb or bootmem allocator where
549 * required. nid is the preferred node and end is the physical address of
550 * the highest address in the node.
552 * Returns the physical address of the memory.
554 static void __init *careful_allocation(int nid, unsigned long size,
556 unsigned long end_pfn)
559 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
561 /* retry over all memory */
563 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
566 panic("numa.c: cannot allocate %lu bytes on node %d",
570 * If the memory came from a previously allocated node, we must
571 * retry with the bootmem allocator.
573 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
575 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
579 panic("numa.c: cannot allocate %lu bytes on node %d",
584 dbg("alloc_bootmem %lx %lx\n", ret, size);
590 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
591 .notifier_call = cpu_numa_callback,
592 .priority = 1 /* Must run before sched domains notifier. */
595 void __init do_init_bootmem(void)
601 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
602 max_pfn = max_low_pfn;
604 if (parse_numa_properties())
607 dump_numa_memory_topology();
609 register_cpu_notifier(&ppc64_numa_nb);
610 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
611 (void *)(unsigned long)boot_cpuid);
613 for_each_online_node(nid) {
614 unsigned long start_pfn, end_pfn;
615 unsigned long bootmem_paddr;
616 unsigned long bootmap_pages;
618 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
620 /* Allocate the node structure node local if possible */
621 NODE_DATA(nid) = careful_allocation(nid,
622 sizeof(struct pglist_data),
623 SMP_CACHE_BYTES, end_pfn);
624 NODE_DATA(nid) = __va(NODE_DATA(nid));
625 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
627 dbg("node %d\n", nid);
628 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
630 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
631 NODE_DATA(nid)->node_start_pfn = start_pfn;
632 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
634 if (NODE_DATA(nid)->node_spanned_pages == 0)
637 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
638 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
640 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
641 bootmem_paddr = (unsigned long)careful_allocation(nid,
642 bootmap_pages << PAGE_SHIFT,
644 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
646 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
648 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
651 free_bootmem_with_active_regions(nid, end_pfn);
653 /* Mark reserved regions on this node */
654 for (i = 0; i < lmb.reserved.cnt; i++) {
655 unsigned long physbase = lmb.reserved.region[i].base;
656 unsigned long size = lmb.reserved.region[i].size;
657 unsigned long start_paddr = start_pfn << PAGE_SHIFT;
658 unsigned long end_paddr = end_pfn << PAGE_SHIFT;
660 if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&
661 early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)
664 if (physbase < end_paddr &&
665 (physbase+size) > start_paddr) {
667 if (physbase < start_paddr) {
668 size -= start_paddr - physbase;
669 physbase = start_paddr;
672 if (size > end_paddr - physbase)
673 size = end_paddr - physbase;
675 dbg("reserve_bootmem %lx %lx\n", physbase,
677 reserve_bootmem_node(NODE_DATA(nid), physbase,
682 sparse_memory_present_with_active_regions(nid);
686 void __init paging_init(void)
688 unsigned long max_zone_pfns[MAX_NR_ZONES];
689 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
690 max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
691 free_area_init_nodes(max_zone_pfns);
694 static int __init early_numa(char *p)
699 if (strstr(p, "off"))
702 if (strstr(p, "debug"))
707 early_param("numa", early_numa);
709 #ifdef CONFIG_MEMORY_HOTPLUG
711 * Find the node associated with a hot added memory section. Section
712 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
713 * sections are fully contained within a single LMB.
715 int hot_add_scn_to_nid(unsigned long scn_addr)
717 struct device_node *memory = NULL;
719 int default_nid = any_online_node(NODE_MASK_ALL);
722 if (!numa_enabled || (min_common_depth < 0))
725 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
726 unsigned long start, size;
728 const unsigned int *memcell_buf;
731 memcell_buf = of_get_property(memory, "reg", &len);
732 if (!memcell_buf || len <= 0)
736 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
738 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
739 size = read_n_cells(n_mem_size_cells, &memcell_buf);
740 nid = of_node_to_nid_single(memory);
742 /* Domains not present at boot default to 0 */
743 if (nid < 0 || !node_online(nid))
746 if ((scn_addr >= start) && (scn_addr < (start + size))) {
751 if (--ranges) /* process all ranges in cell */
754 BUG(); /* section address should be found above */
757 /* Temporary code to ensure that returned node is not empty */
760 while (NODE_DATA(nid)->node_spanned_pages == 0) {
761 node_clear(nid, nodes);
762 nid = any_online_node(nodes);
766 #endif /* CONFIG_MEMORY_HOTPLUG */