1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
20 #include <asm/stackprotector.h>
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
34 #define BOOT_PERCPU_OFFSET 0
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
43 EXPORT_SYMBOL(__per_cpu_offset);
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
53 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
55 #define PERCPU_FIRST_CHUNK_RESERVE 0
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
66 * true if NUMA should be considered; otherwise, false.
68 static bool __init pcpu_need_numa(void)
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
81 last = NODE_DATA(node);
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
98 * Pointer to the allocated area on success, NULL on failure.
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu, size, node, __pa(ptr));
122 return __alloc_bootmem_nopanic(size, align, goal);
129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping.
131 * As PMD page is quite large, only part of it is used for the first
132 * chunk. Unused part is returned to the bootmem allocator.
134 * So, the PMD pages are mapped twice - once to the physical mapping
135 * and to the vmalloc area for the first percpu chunk. The double
136 * mapping does add one more PMD TLB entry pressure but still is much
137 * better than only using 4k mappings while still being NUMA friendly.
139 #ifdef CONFIG_NEED_MULTIPLE_NODES
140 static size_t pcpur_size __initdata;
141 static void **pcpur_ptrs __initdata;
143 static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
145 size_t off = (size_t)pageno << PAGE_SHIFT;
147 if (off >= pcpur_size)
150 return virt_to_page(pcpur_ptrs[cpu] + off);
153 static ssize_t __init setup_pcpu_remap(size_t static_size)
155 static struct vm_struct vm;
156 size_t ptrs_size, dyn_size;
161 * If large page isn't supported, there's no benefit in doing
162 * this. Also, on non-NUMA, embedding is better.
164 * NOTE: disabled for now.
166 if (true || !cpu_has_pse || !pcpu_need_numa())
170 * Currently supports only single page. Supporting multiple
171 * pages won't be too difficult if it ever becomes necessary.
173 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
174 PERCPU_DYNAMIC_RESERVE);
175 if (pcpur_size > PMD_SIZE) {
176 pr_warning("PERCPU: static data is larger than large page, "
177 "can't use large page\n");
180 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
182 /* allocate pointer array and alloc large pages */
183 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
184 pcpur_ptrs = alloc_bootmem(ptrs_size);
186 for_each_possible_cpu(cpu) {
187 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
188 if (!pcpur_ptrs[cpu])
192 * Only use pcpur_size bytes and give back the rest.
194 * Ingo: The 2MB up-rounding bootmem is needed to make
195 * sure the partial 2MB page is still fully RAM - it's
196 * not well-specified to have a PAT-incompatible area
197 * (unmapped RAM, device memory, etc.) in that hole.
199 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
200 PMD_SIZE - pcpur_size);
202 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
205 /* allocate address and map */
207 vm.size = num_possible_cpus() * PMD_SIZE;
208 vm_area_register_early(&vm, PMD_SIZE);
210 for_each_possible_cpu(cpu) {
213 pmd = populate_extra_pmd((unsigned long)vm.addr
215 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
219 /* we're ready, commit */
220 pr_info("PERCPU: Remapped at %p with large pages, static data "
221 "%zu bytes\n", vm.addr, static_size);
223 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
224 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
225 PMD_SIZE, vm.addr, NULL);
229 for_each_possible_cpu(cpu)
231 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
234 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
238 static ssize_t __init setup_pcpu_remap(size_t static_size)
245 * Embedding allocator
247 * The first chunk is sized to just contain the static area plus
248 * module and dynamic reserves and embedded into linear physical
249 * mapping so that it can use PMD mapping without additional TLB
252 static ssize_t __init setup_pcpu_embed(size_t static_size)
254 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
257 * If large page isn't supported, there's no benefit in doing
258 * this. Also, embedding allocation doesn't play well with
261 if (!cpu_has_pse || pcpu_need_numa())
264 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
265 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
271 * This is the basic allocator. Static percpu area is allocated
272 * page-by-page and most of initialization is done by the generic
275 static struct page **pcpu4k_pages __initdata;
276 static int pcpu4k_nr_static_pages __initdata;
278 static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
280 if (pageno < pcpu4k_nr_static_pages)
281 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
285 static void __init pcpu4k_populate_pte(unsigned long addr)
287 populate_extra_pte(addr);
290 static ssize_t __init setup_pcpu_4k(size_t static_size)
297 pcpu4k_nr_static_pages = PFN_UP(static_size);
299 /* unaligned allocations can't be freed, round up to page size */
300 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
301 * sizeof(pcpu4k_pages[0]));
302 pcpu4k_pages = alloc_bootmem(pages_size);
304 /* allocate and copy */
306 for_each_possible_cpu(cpu)
307 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
310 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
314 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
315 pcpu4k_pages[j++] = virt_to_page(ptr);
318 /* we're ready, commit */
319 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
320 pcpu4k_nr_static_pages, static_size);
322 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
323 PERCPU_FIRST_CHUNK_RESERVE, -1,
324 -1, NULL, pcpu4k_populate_pte);
329 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
332 free_bootmem(__pa(pcpu4k_pages), pages_size);
336 static inline void setup_percpu_segment(int cpu)
339 struct desc_struct gdt;
341 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
342 0x2 | DESCTYPE_S, 0x8);
344 write_gdt_entry(get_cpu_gdt_table(cpu),
345 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
351 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
352 * Always point %gs to its beginning
354 void __init setup_per_cpu_areas(void)
356 size_t static_size = __per_cpu_end - __per_cpu_start;
359 size_t pcpu_unit_size;
362 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
363 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
366 * Allocate percpu area. If PSE is supported, try to make use
367 * of large page mappings. Please read comments on top of
368 * each allocator for details.
370 ret = setup_pcpu_remap(static_size);
372 ret = setup_pcpu_embed(static_size);
374 ret = setup_pcpu_4k(static_size);
376 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
379 pcpu_unit_size = ret;
381 /* alrighty, percpu areas up and running */
382 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
383 for_each_possible_cpu(cpu) {
384 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
385 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
386 per_cpu(cpu_number, cpu) = cpu;
387 setup_percpu_segment(cpu);
388 setup_stack_canary_segment(cpu);
390 * Copy data used in early init routines from the
391 * initial arrays to the per cpu data areas. These
392 * arrays then become expendable and the *_early_ptr's
393 * are zeroed indicating that the static arrays are
396 #ifdef CONFIG_X86_LOCAL_APIC
397 per_cpu(x86_cpu_to_apicid, cpu) =
398 early_per_cpu_map(x86_cpu_to_apicid, cpu);
399 per_cpu(x86_bios_cpu_apicid, cpu) =
400 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
403 per_cpu(irq_stack_ptr, cpu) =
404 per_cpu(irq_stack_union.irq_stack, cpu) +
407 per_cpu(x86_cpu_to_node_map, cpu) =
408 early_per_cpu_map(x86_cpu_to_node_map, cpu);
412 * Up to this point, the boot CPU has been using .data.init
413 * area. Reload any changed state for the boot CPU.
415 if (cpu == boot_cpu_id)
416 switch_to_new_gdt(cpu);
419 /* indicate the early static arrays will soon be gone */
420 #ifdef CONFIG_X86_LOCAL_APIC
421 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
422 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
424 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
425 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
428 /* Setup node to cpumask map */
429 setup_node_to_cpumask_map();
431 /* Setup cpu initialized, callin, callout masks */
432 setup_cpu_local_masks();