x86, percpu: fix minor bugs in setup_percpu.c
[linux-2.6] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
19 #include <asm/cpu.h>
20 #include <asm/stackprotector.h>
21
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
24 #else
25 # define DBG(x...)
26 #endif
27
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
30
31 #ifdef CONFIG_X86_64
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #else
34 #define BOOT_PERCPU_OFFSET 0
35 #endif
36
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
42 };
43 EXPORT_SYMBOL(__per_cpu_offset);
44
45 /**
46  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
47  *
48  * If NUMA is not configured or there is only one NUMA node available,
49  * there is no reason to consider NUMA.  This function determines
50  * whether percpu allocation should consider NUMA or not.
51  *
52  * RETURNS:
53  * true if NUMA should be considered; otherwise, false.
54  */
55 static bool __init pcpu_need_numa(void)
56 {
57 #ifdef CONFIG_NEED_MULTIPLE_NODES
58         pg_data_t *last = NULL;
59         unsigned int cpu;
60
61         for_each_possible_cpu(cpu) {
62                 int node = early_cpu_to_node(cpu);
63
64                 if (node_online(node) && NODE_DATA(node) &&
65                     last && last != NODE_DATA(node))
66                         return true;
67
68                 last = NODE_DATA(node);
69         }
70 #endif
71         return false;
72 }
73
74 /**
75  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
76  * @cpu: cpu to allocate for
77  * @size: size allocation in bytes
78  * @align: alignment
79  *
80  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
81  * does the right thing for NUMA regardless of the current
82  * configuration.
83  *
84  * RETURNS:
85  * Pointer to the allocated area on success, NULL on failure.
86  */
87 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
88                                         unsigned long align)
89 {
90         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
91 #ifdef CONFIG_NEED_MULTIPLE_NODES
92         int node = early_cpu_to_node(cpu);
93         void *ptr;
94
95         if (!node_online(node) || !NODE_DATA(node)) {
96                 ptr = __alloc_bootmem_nopanic(size, align, goal);
97                 pr_info("cpu %d has no node %d or node-local memory\n",
98                         cpu, node);
99                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
100                          cpu, size, __pa(ptr));
101         } else {
102                 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
103                                                    size, align, goal);
104                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
105                          "%016lx\n", cpu, size, node, __pa(ptr));
106         }
107         return ptr;
108 #else
109         return __alloc_bootmem_nopanic(size, align, goal);
110 #endif
111 }
112
113 /*
114  * Remap allocator
115  *
116  * This allocator uses PMD page as unit.  A PMD page is allocated for
117  * each cpu and each is remapped into vmalloc area using PMD mapping.
118  * As PMD page is quite large, only part of it is used for the first
119  * chunk.  Unused part is returned to the bootmem allocator.
120  *
121  * So, the PMD pages are mapped twice - once to the physical mapping
122  * and to the vmalloc area for the first percpu chunk.  The double
123  * mapping does add one more PMD TLB entry pressure but still is much
124  * better than only using 4k mappings while still being NUMA friendly.
125  */
126 #ifdef CONFIG_NEED_MULTIPLE_NODES
127 static size_t pcpur_size __initdata;
128 static void **pcpur_ptrs __initdata;
129
130 static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
131 {
132         size_t off = (size_t)pageno << PAGE_SHIFT;
133
134         if (off >= pcpur_size)
135                 return NULL;
136
137         return virt_to_page(pcpur_ptrs[cpu] + off);
138 }
139
140 static ssize_t __init setup_pcpu_remap(size_t static_size)
141 {
142         static struct vm_struct vm;
143         pg_data_t *last;
144         size_t ptrs_size;
145         unsigned int cpu;
146         ssize_t ret;
147
148         /*
149          * If large page isn't supported, there's no benefit in doing
150          * this.  Also, on non-NUMA, embedding is better.
151          */
152         if (!cpu_has_pse || pcpu_need_numa())
153                 return -EINVAL;
154
155         last = NULL;
156         for_each_possible_cpu(cpu) {
157                 int node = early_cpu_to_node(cpu);
158
159                 if (node_online(node) && NODE_DATA(node) &&
160                     last && last != NODE_DATA(node))
161                         goto proceed;
162
163                 last = NODE_DATA(node);
164         }
165         return -EINVAL;
166
167 proceed:
168         /*
169          * Currently supports only single page.  Supporting multiple
170          * pages won't be too difficult if it ever becomes necessary.
171          */
172         pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
173         if (pcpur_size > PMD_SIZE) {
174                 pr_warning("PERCPU: static data is larger than large page, "
175                            "can't use large page\n");
176                 return -EINVAL;
177         }
178
179         /* allocate pointer array and alloc large pages */
180         ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
181         pcpur_ptrs = alloc_bootmem(ptrs_size);
182
183         for_each_possible_cpu(cpu) {
184                 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
185                 if (!pcpur_ptrs[cpu])
186                         goto enomem;
187
188                 /*
189                  * Only use pcpur_size bytes and give back the rest.
190                  *
191                  * Ingo: The 2MB up-rounding bootmem is needed to make
192                  * sure the partial 2MB page is still fully RAM - it's
193                  * not well-specified to have a PAT-incompatible area
194                  * (unmapped RAM, device memory, etc.) in that hole.
195                  */
196                 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
197                              PMD_SIZE - pcpur_size);
198
199                 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
200         }
201
202         /* allocate address and map */
203         vm.flags = VM_ALLOC;
204         vm.size = num_possible_cpus() * PMD_SIZE;
205         vm_area_register_early(&vm, PMD_SIZE);
206
207         for_each_possible_cpu(cpu) {
208                 pmd_t *pmd;
209
210                 pmd = populate_extra_pmd((unsigned long)vm.addr
211                                          + cpu * PMD_SIZE);
212                 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
213                                      PAGE_KERNEL_LARGE));
214         }
215
216         /* we're ready, commit */
217         pr_info("PERCPU: Remapped at %p with large pages, static data "
218                 "%zu bytes\n", vm.addr, static_size);
219
220         ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE,
221                                      pcpur_size - static_size, vm.addr, NULL);
222         goto out_free_ar;
223
224 enomem:
225         for_each_possible_cpu(cpu)
226                 if (pcpur_ptrs[cpu])
227                         free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
228         ret = -ENOMEM;
229 out_free_ar:
230         free_bootmem(__pa(pcpur_ptrs), ptrs_size);
231         return ret;
232 }
233 #else
234 static ssize_t __init setup_pcpu_remap(size_t static_size)
235 {
236         return -EINVAL;
237 }
238 #endif
239
240 /*
241  * Embedding allocator
242  *
243  * The first chunk is sized to just contain the static area plus
244  * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using
245  * bootmem allocator and used as-is without being mapped into vmalloc
246  * area.  This enables the first chunk to piggy back on the linear
247  * physical PMD mapping and doesn't add any additional pressure to
248  * TLB.
249  */
250 static void *pcpue_ptr __initdata;
251 static size_t pcpue_unit_size __initdata;
252
253 static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
254 {
255         return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size
256                             + ((size_t)pageno << PAGE_SHIFT));
257 }
258
259 static ssize_t __init setup_pcpu_embed(size_t static_size)
260 {
261         unsigned int cpu;
262
263         /*
264          * If large page isn't supported, there's no benefit in doing
265          * this.  Also, embedding allocation doesn't play well with
266          * NUMA.
267          */
268         if (!cpu_has_pse || pcpu_need_numa())
269                 return -EINVAL;
270
271         /* allocate and copy */
272         pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
273         pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE);
274         pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
275                                        PAGE_SIZE);
276         if (!pcpue_ptr)
277                 return -ENOMEM;
278
279         for_each_possible_cpu(cpu)
280                 memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load,
281                        static_size);
282
283         /* we're ready, commit */
284         pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
285                 pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size);
286
287         return pcpu_setup_first_chunk(pcpue_get_page, static_size,
288                                       pcpue_unit_size,
289                                       pcpue_unit_size - static_size, pcpue_ptr,
290                                       NULL);
291 }
292
293 /*
294  * 4k page allocator
295  *
296  * This is the basic allocator.  Static percpu area is allocated
297  * page-by-page and most of initialization is done by the generic
298  * setup function.
299  */
300 static struct page **pcpu4k_pages __initdata;
301 static int pcpu4k_nr_static_pages __initdata;
302
303 static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
304 {
305         if (pageno < pcpu4k_nr_static_pages)
306                 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
307         return NULL;
308 }
309
310 static void __init pcpu4k_populate_pte(unsigned long addr)
311 {
312         populate_extra_pte(addr);
313 }
314
315 static ssize_t __init setup_pcpu_4k(size_t static_size)
316 {
317         size_t pages_size;
318         unsigned int cpu;
319         int i, j;
320         ssize_t ret;
321
322         pcpu4k_nr_static_pages = PFN_UP(static_size);
323
324         /* unaligned allocations can't be freed, round up to page size */
325         pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
326                                * sizeof(pcpu4k_pages[0]));
327         pcpu4k_pages = alloc_bootmem(pages_size);
328
329         /* allocate and copy */
330         j = 0;
331         for_each_possible_cpu(cpu)
332                 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
333                         void *ptr;
334
335                         ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
336                         if (!ptr)
337                                 goto enomem;
338
339                         memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
340                         pcpu4k_pages[j++] = virt_to_page(ptr);
341                 }
342
343         /* we're ready, commit */
344         pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
345                 pcpu4k_nr_static_pages, static_size);
346
347         ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
348                                      pcpu4k_populate_pte);
349         goto out_free_ar;
350
351 enomem:
352         while (--j >= 0)
353                 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
354         ret = -ENOMEM;
355 out_free_ar:
356         free_bootmem(__pa(pcpu4k_pages), pages_size);
357         return ret;
358 }
359
360 static inline void setup_percpu_segment(int cpu)
361 {
362 #ifdef CONFIG_X86_32
363         struct desc_struct gdt;
364
365         pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
366                         0x2 | DESCTYPE_S, 0x8);
367         gdt.s = 1;
368         write_gdt_entry(get_cpu_gdt_table(cpu),
369                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
370 #endif
371 }
372
373 /*
374  * Great future plan:
375  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
376  * Always point %gs to its beginning
377  */
378 void __init setup_per_cpu_areas(void)
379 {
380         size_t static_size = __per_cpu_end - __per_cpu_start;
381         unsigned int cpu;
382         unsigned long delta;
383         size_t pcpu_unit_size;
384         ssize_t ret;
385
386         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
387                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
388
389         /*
390          * Allocate percpu area.  If PSE is supported, try to make use
391          * of large page mappings.  Please read comments on top of
392          * each allocator for details.
393          */
394         ret = setup_pcpu_remap(static_size);
395         if (ret < 0)
396                 ret = setup_pcpu_embed(static_size);
397         if (ret < 0)
398                 ret = setup_pcpu_4k(static_size);
399         if (ret < 0)
400                 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
401                       static_size, ret);
402
403         pcpu_unit_size = ret;
404
405         /* alrighty, percpu areas up and running */
406         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
407         for_each_possible_cpu(cpu) {
408                 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
409                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
410                 per_cpu(cpu_number, cpu) = cpu;
411                 setup_percpu_segment(cpu);
412                 setup_stack_canary_segment(cpu);
413                 /*
414                  * Copy data used in early init routines from the
415                  * initial arrays to the per cpu data areas.  These
416                  * arrays then become expendable and the *_early_ptr's
417                  * are zeroed indicating that the static arrays are
418                  * gone.
419                  */
420 #ifdef CONFIG_X86_LOCAL_APIC
421                 per_cpu(x86_cpu_to_apicid, cpu) =
422                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
423                 per_cpu(x86_bios_cpu_apicid, cpu) =
424                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
425 #endif
426 #ifdef CONFIG_X86_64
427                 per_cpu(irq_stack_ptr, cpu) =
428                         per_cpu(irq_stack_union.irq_stack, cpu) +
429                         IRQ_STACK_SIZE - 64;
430 #ifdef CONFIG_NUMA
431                 per_cpu(x86_cpu_to_node_map, cpu) =
432                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
433 #endif
434 #endif
435                 /*
436                  * Up to this point, the boot CPU has been using .data.init
437                  * area.  Reload any changed state for the boot CPU.
438                  */
439                 if (cpu == boot_cpu_id)
440                         switch_to_new_gdt(cpu);
441         }
442
443         /* indicate the early static arrays will soon be gone */
444 #ifdef CONFIG_X86_LOCAL_APIC
445         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
446         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
447 #endif
448 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
449         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
450 #endif
451
452         /* Setup node to cpumask map */
453         setup_node_to_cpumask_map();
454
455         /* Setup cpu initialized, callin, callout masks */
456         setup_cpu_local_masks();
457 }