1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
7 #include <asm/percpu.h>
8 #include <asm/sections.h>
9 #include <asm/processor.h>
10 #include <asm/setup.h>
11 #include <asm/topology.h>
12 #include <asm/mpspec.h>
13 #include <asm/apicdef.h>
15 #ifdef CONFIG_X86_LOCAL_APIC
16 unsigned int num_processors;
17 unsigned disabled_cpus __cpuinitdata;
18 /* Processor that is doing the boot up */
19 unsigned int boot_cpu_physical_apicid = -1U;
20 unsigned int max_physical_apicid;
21 EXPORT_SYMBOL(boot_cpu_physical_apicid);
23 /* Bitmask of physically existing CPUs */
24 physid_mask_t phys_cpu_present_map;
27 /* map cpu index to physical APIC ID */
28 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
29 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
30 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
31 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
33 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
36 /* map cpu index to node index */
37 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
38 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
40 /* which logical CPUs are on which nodes */
41 cpumask_t *node_to_cpumask_map;
42 EXPORT_SYMBOL(node_to_cpumask_map);
44 /* setup node_to_cpumask_map */
45 static void __init setup_node_to_cpumask_map(void);
48 static inline void setup_node_to_cpumask_map(void) { }
51 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
53 * Copy data used in early init routines from the initial arrays to the
54 * per cpu data areas. These arrays then become expendable and the
55 * *_early_ptr's are zeroed indicating that the static arrays are gone.
57 static void __init setup_per_cpu_maps(void)
61 for_each_possible_cpu(cpu) {
62 per_cpu(x86_cpu_to_apicid, cpu) =
63 early_per_cpu_map(x86_cpu_to_apicid, cpu);
64 per_cpu(x86_bios_cpu_apicid, cpu) =
65 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
67 per_cpu(x86_cpu_to_node_map, cpu) =
68 early_per_cpu_map(x86_cpu_to_node_map, cpu);
72 /* indicate the early static arrays will soon be gone */
73 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
74 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
76 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
80 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
81 cpumask_t *cpumask_of_cpu_map __read_mostly;
82 EXPORT_SYMBOL(cpumask_of_cpu_map);
84 /* requires nr_cpu_ids to be initialized */
85 static void __init setup_cpumask_of_cpu(void)
89 /* alloc_bootmem zeroes memory */
90 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
91 for (i = 0; i < nr_cpu_ids; i++)
92 cpu_set(i, cpumask_of_cpu_map[i]);
95 static inline void setup_cpumask_of_cpu(void) { }
100 * Great future not-so-futuristic plan: make i386 and x86_64 do it
103 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
104 EXPORT_SYMBOL(__per_cpu_offset);
105 static inline void setup_cpu_pda_map(void) { }
107 #elif !defined(CONFIG_SMP)
108 static inline void setup_cpu_pda_map(void) { }
110 #else /* CONFIG_SMP && CONFIG_X86_64 */
113 * Allocate cpu_pda pointer table and array via alloc_bootmem.
115 static void __init setup_cpu_pda_map(void)
118 struct x8664_pda **new_cpu_pda;
122 size = roundup(sizeof(struct x8664_pda), cache_line_size());
124 /* allocate cpu_pda array and pointer table */
126 unsigned long tsize = nr_cpu_ids * sizeof(void *);
127 unsigned long asize = size * (nr_cpu_ids - 1);
129 tsize = roundup(tsize, cache_line_size());
130 new_cpu_pda = alloc_bootmem(tsize + asize);
131 pda = (char *)new_cpu_pda + tsize;
134 /* initialize pointer table to static pda's */
135 for_each_possible_cpu(cpu) {
137 /* leave boot cpu pda in place */
138 new_cpu_pda[0] = cpu_pda(0);
141 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
142 new_cpu_pda[cpu]->in_bootmem = 1;
146 /* point to new pointer table */
147 _cpu_pda = new_cpu_pda;
153 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
154 * Always point %gs to its beginning
156 void __init setup_per_cpu_areas(void)
158 ssize_t size = PERCPU_ENOUGH_ROOM;
162 #ifdef CONFIG_HOTPLUG_CPU
163 prefill_possible_map();
165 nr_cpu_ids = num_processors;
168 /* Setup cpu_pda map */
171 /* Copy section for each CPU (we discard the original) */
172 size = PERCPU_ENOUGH_ROOM;
173 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
176 for_each_possible_cpu(cpu) {
177 #ifndef CONFIG_NEED_MULTIPLE_NODES
178 ptr = alloc_bootmem_pages(size);
180 int node = early_cpu_to_node(cpu);
181 if (!node_online(node) || !NODE_DATA(node)) {
182 ptr = alloc_bootmem_pages(size);
184 "cpu %d has no node %d or node-local memory\n",
188 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
190 per_cpu_offset(cpu) = ptr - __per_cpu_start;
191 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
195 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
196 NR_CPUS, nr_cpu_ids, nr_node_ids);
198 /* Setup percpu data maps */
199 setup_per_cpu_maps();
201 /* Setup node to cpumask map */
202 setup_node_to_cpumask_map();
204 /* Setup cpumask_of_cpu map */
205 setup_cpumask_of_cpu();
210 void __init parse_setup_data(void)
212 struct setup_data *data;
215 if (boot_params.hdr.version < 0x0209)
217 pa_data = boot_params.hdr.setup_data;
219 data = early_ioremap(pa_data, PAGE_SIZE);
220 switch (data->type) {
222 parse_e820_ext(data, pa_data);
227 #ifndef CONFIG_DEBUG_BOOT_PARAMS
228 free_early(pa_data, pa_data+sizeof(*data)+data->len);
230 pa_data = data->next;
231 early_iounmap(data, PAGE_SIZE);
238 * Allocate node_to_cpumask_map based on number of available nodes
239 * Requires node_possible_map to be valid.
241 * Note: node_to_cpumask() is not valid until after this is done.
243 static void __init setup_node_to_cpumask_map(void)
245 unsigned int node, num = 0;
248 /* setup nr_node_ids if not done yet */
249 if (nr_node_ids == MAX_NUMNODES) {
250 for_each_node_mask(node, node_possible_map)
252 nr_node_ids = num + 1;
255 /* allocate the map */
256 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
258 Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
261 /* node_to_cpumask() will now work */
262 node_to_cpumask_map = map;
265 void __cpuinit numa_set_node(int cpu, int node)
267 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
269 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
270 cpu_pda(cpu)->nodenumber = node;
273 cpu_to_node_map[cpu] = node;
275 else if (per_cpu_offset(cpu))
276 per_cpu(x86_cpu_to_node_map, cpu) = node;
279 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
282 void __cpuinit numa_clear_node(int cpu)
284 numa_set_node(cpu, NUMA_NO_NODE);
287 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
289 void __cpuinit numa_add_cpu(int cpu)
291 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
294 void __cpuinit numa_remove_cpu(int cpu)
296 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
299 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
302 * --------- debug versions of the numa functions ---------
304 static void __cpuinit numa_set_cpumask(int cpu, int enable)
306 int node = cpu_to_node(cpu);
310 if (node_to_cpumask_map == NULL) {
311 printk(KERN_ERR "node_to_cpumask_map NULL\n");
316 mask = &node_to_cpumask_map[node];
320 cpu_clear(cpu, *mask);
322 cpulist_scnprintf(buf, sizeof(buf), *mask);
323 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
324 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
327 void __cpuinit numa_add_cpu(int cpu)
329 numa_set_cpumask(cpu, 1);
332 void __cpuinit numa_remove_cpu(int cpu)
334 numa_set_cpumask(cpu, 0);
337 int cpu_to_node(int cpu)
339 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
341 "cpu_to_node(%d): usage too early!\n", cpu);
343 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
345 return per_cpu(x86_cpu_to_node_map, cpu);
347 EXPORT_SYMBOL(cpu_to_node);
350 * Same function as cpu_to_node() but used if called before the
351 * per_cpu areas are setup.
353 int early_cpu_to_node(int cpu)
355 if (early_per_cpu_ptr(x86_cpu_to_node_map))
356 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
358 if (!per_cpu_offset(cpu)) {
360 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
364 return per_cpu(x86_cpu_to_node_map, cpu);
368 * Returns a pointer to the bitmask of CPUs on Node 'node'.
370 cpumask_t *_node_to_cpumask_ptr(int node)
372 if (node_to_cpumask_map == NULL) {
374 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
377 return &cpu_online_map;
379 BUG_ON(node >= nr_node_ids);
380 return &node_to_cpumask_map[node];
382 EXPORT_SYMBOL(_node_to_cpumask_ptr);
385 * Returns a bitmask of CPUs on Node 'node'.
387 cpumask_t node_to_cpumask(int node)
389 if (node_to_cpumask_map == NULL) {
391 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
393 return cpu_online_map;
395 BUG_ON(node >= nr_node_ids);
396 return node_to_cpumask_map[node];
398 EXPORT_SYMBOL(node_to_cpumask);
401 * --------- end of debug versions of the numa functions ---------
404 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
406 #endif /* X86_64_NUMA */