2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/smp_lock.h>
46 #include <linux/bootmem.h>
47 #include <linux/thread_info.h>
48 #include <linux/module.h>
49 #include <linux/delay.h>
50 #include <linux/mc146818rtc.h>
51 #include <linux/smp.h>
54 #include <asm/pgalloc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
61 #include <asm/hw_irq.h>
64 /* Number of siblings per CPU package */
65 int smp_num_siblings = 1;
66 EXPORT_SYMBOL(smp_num_siblings);
68 /* Last level cache ID of each logical CPU */
69 u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
70 EXPORT_SYMBOL(cpu_llc_id);
72 /* Bitmask of currently online CPUs */
73 cpumask_t cpu_online_map __read_mostly;
75 EXPORT_SYMBOL(cpu_online_map);
78 * Private maps to synchronize booting between AP and BP.
79 * Probably not needed anymore, but it makes for easier debugging. -AK
81 cpumask_t cpu_callin_map;
82 cpumask_t cpu_callout_map;
83 EXPORT_SYMBOL(cpu_callout_map);
85 cpumask_t cpu_possible_map;
86 EXPORT_SYMBOL(cpu_possible_map);
88 /* Per CPU bogomips and other parameters */
89 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
90 EXPORT_SYMBOL(cpu_data);
92 /* Set when the idlers are all forked */
93 int smp_threads_ready;
95 /* representing HT siblings of each logical CPU */
96 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
97 EXPORT_SYMBOL(cpu_sibling_map);
99 /* representing HT and core siblings of each logical CPU */
100 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
101 EXPORT_SYMBOL(cpu_core_map);
104 * Trampoline 80x86 program as an array.
107 extern unsigned char trampoline_data[];
108 extern unsigned char trampoline_end[];
110 /* State of each CPU */
111 DEFINE_PER_CPU(int, cpu_state) = { 0 };
114 * Store all idle threads, this can be reused instead of creating
115 * a new thread. Also avoids complicated thread destroy functionality
118 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
120 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
121 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
124 * Currently trivial. Write the real->protected mode
125 * bootstrap into the page concerned. The caller
126 * has made sure it's suitably aligned.
129 static unsigned long __cpuinit setup_trampoline(void)
131 void *tramp = __va(SMP_TRAMPOLINE_BASE);
132 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
133 return virt_to_phys(tramp);
137 * The bootstrap kernel entry code has set these up. Save them for
141 static void __cpuinit smp_store_cpu_info(int id)
143 struct cpuinfo_x86 *c = cpu_data + id;
150 static atomic_t init_deasserted __cpuinitdata;
153 * Report back to the Boot Processor.
156 void __cpuinit smp_callin(void)
159 unsigned long timeout;
162 * If waken up by an INIT in an 82489DX configuration
163 * we may get here before an INIT-deassert IPI reaches
164 * our local APIC. We have to wait for the IPI or we'll
165 * lock up on an APIC access.
167 while (!atomic_read(&init_deasserted))
171 * (This works even if the APIC is not enabled.)
173 phys_id = GET_APIC_ID(apic_read(APIC_ID));
174 cpuid = smp_processor_id();
175 if (cpu_isset(cpuid, cpu_callin_map)) {
176 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
179 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
182 * STARTUP IPIs are fragile beasts as they might sometimes
183 * trigger some glue motherboard logic. Complete APIC bus
184 * silence for 1 second, this overestimates the time the
185 * boot CPU is spending to send the up to 2 STARTUP IPIs
186 * by a factor of two. This should be enough.
190 * Waiting 2s total for startup (udelay is not yet working)
192 timeout = jiffies + 2*HZ;
193 while (time_before(jiffies, timeout)) {
195 * Has the boot CPU finished it's STARTUP sequence?
197 if (cpu_isset(cpuid, cpu_callout_map))
202 if (!time_before(jiffies, timeout)) {
203 panic("smp_callin: CPU%d started up but did not get a callout!\n",
208 * the boot CPU has finished the init stage and is spinning
209 * on callin_map until we finish. We are free to set up this
210 * CPU, first the APIC. (this is probably redundant on most
214 Dprintk("CALLIN, before setup_local_APIC().\n");
220 * Need to enable IRQs because it can take longer and then
221 * the NMI watchdog might kill us.
226 Dprintk("Stack at about %p\n",&cpuid);
228 disable_APIC_timer();
231 * Save our processor parameters
233 smp_store_cpu_info(cpuid);
236 * Allow the master to continue.
238 cpu_set(cpuid, cpu_callin_map);
241 /* maps the cpu to the sched domain representing multi-core */
242 cpumask_t cpu_coregroup_map(int cpu)
244 struct cpuinfo_x86 *c = cpu_data + cpu;
246 * For perf, we return last level cache shared map.
247 * And for power savings, we return cpu_core_map
249 if (sched_mc_power_savings || sched_smt_power_savings)
250 return cpu_core_map[cpu];
252 return c->llc_shared_map;
255 /* representing cpus for which sibling maps can be computed */
256 static cpumask_t cpu_sibling_setup_map;
258 static inline void set_cpu_sibling_map(int cpu)
261 struct cpuinfo_x86 *c = cpu_data;
263 cpu_set(cpu, cpu_sibling_setup_map);
265 if (smp_num_siblings > 1) {
266 for_each_cpu_mask(i, cpu_sibling_setup_map) {
267 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
268 c[cpu].cpu_core_id == c[i].cpu_core_id) {
269 cpu_set(i, cpu_sibling_map[cpu]);
270 cpu_set(cpu, cpu_sibling_map[i]);
271 cpu_set(i, cpu_core_map[cpu]);
272 cpu_set(cpu, cpu_core_map[i]);
273 cpu_set(i, c[cpu].llc_shared_map);
274 cpu_set(cpu, c[i].llc_shared_map);
278 cpu_set(cpu, cpu_sibling_map[cpu]);
281 cpu_set(cpu, c[cpu].llc_shared_map);
283 if (current_cpu_data.x86_max_cores == 1) {
284 cpu_core_map[cpu] = cpu_sibling_map[cpu];
285 c[cpu].booted_cores = 1;
289 for_each_cpu_mask(i, cpu_sibling_setup_map) {
290 if (cpu_llc_id[cpu] != BAD_APICID &&
291 cpu_llc_id[cpu] == cpu_llc_id[i]) {
292 cpu_set(i, c[cpu].llc_shared_map);
293 cpu_set(cpu, c[i].llc_shared_map);
295 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
296 cpu_set(i, cpu_core_map[cpu]);
297 cpu_set(cpu, cpu_core_map[i]);
299 * Does this new cpu bringup a new core?
301 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
303 * for each core in package, increment
304 * the booted_cores for this new cpu
306 if (first_cpu(cpu_sibling_map[i]) == i)
307 c[cpu].booted_cores++;
309 * increment the core count for all
310 * the other cpus in this package
314 } else if (i != cpu && !c[cpu].booted_cores)
315 c[cpu].booted_cores = c[i].booted_cores;
321 * Setup code on secondary processor (after comming out of the trampoline)
323 void __cpuinit start_secondary(void)
326 * Dont put anything before smp_callin(), SMP
327 * booting is too fragile that we want to limit the
328 * things done here to the most necessary things.
334 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
338 * Check TSC sync first:
340 check_tsc_sync_target();
342 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
343 setup_secondary_APIC_clock();
345 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
347 if (nmi_watchdog == NMI_IO_APIC) {
348 disable_8259A_irq(0);
349 enable_NMI_through_LVT0(NULL);
356 * The sibling maps must be set before turing the online map on for
359 set_cpu_sibling_map(smp_processor_id());
362 * We need to hold call_lock, so there is no inconsistency
363 * between the time smp_call_function() determines number of
364 * IPI receipients, and the time when the determination is made
365 * for which cpus receive the IPI in genapic_flat.c. Holding this
366 * lock helps us to not include this cpu in a currently in progress
367 * smp_call_function().
369 lock_ipi_call_lock();
370 spin_lock(&vector_lock);
372 /* Setup the per cpu irq handling data structures */
373 __setup_vector_irq(smp_processor_id());
375 * Allow the master to continue.
377 cpu_set(smp_processor_id(), cpu_online_map);
378 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
379 spin_unlock(&vector_lock);
381 unlock_ipi_call_lock();
386 extern volatile unsigned long init_rsp;
387 extern void (*initial_code)(void);
390 static void inquire_remote_apic(int apicid)
392 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
393 char *names[] = { "ID", "VERSION", "SPIV" };
396 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
398 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
399 printk("... APIC #%d %s: ", apicid, names[i]);
404 apic_wait_icr_idle();
406 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
407 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
412 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
413 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
416 case APIC_ICR_RR_VALID:
417 status = apic_read(APIC_RRR);
418 printk("%08x\n", status);
428 * Kick the secondary to wake up.
430 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
432 unsigned long send_status = 0, accept_status = 0;
433 int maxlvt, timeout, num_starts, j;
435 Dprintk("Asserting INIT.\n");
438 * Turn INIT on target chip
440 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
445 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
448 Dprintk("Waiting for send to finish...\n");
453 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
454 } while (send_status && (timeout++ < 1000));
458 Dprintk("Deasserting INIT.\n");
461 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
464 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
466 Dprintk("Waiting for send to finish...\n");
471 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
472 } while (send_status && (timeout++ < 1000));
475 atomic_set(&init_deasserted, 1);
480 * Run STARTUP IPI loop.
482 Dprintk("#startup loops: %d.\n", num_starts);
484 maxlvt = get_maxlvt();
486 for (j = 1; j <= num_starts; j++) {
487 Dprintk("Sending STARTUP #%d.\n",j);
488 apic_write(APIC_ESR, 0);
490 Dprintk("After apic_write.\n");
497 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
499 /* Boot on the stack */
500 /* Kick the second */
501 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
504 * Give the other CPU some time to accept the IPI.
508 Dprintk("Startup point 1.\n");
510 Dprintk("Waiting for send to finish...\n");
515 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
516 } while (send_status && (timeout++ < 1000));
519 * Give the other CPU some time to accept the IPI.
523 * Due to the Pentium erratum 3AP.
526 apic_write(APIC_ESR, 0);
528 accept_status = (apic_read(APIC_ESR) & 0xEF);
529 if (send_status || accept_status)
532 Dprintk("After Startup.\n");
535 printk(KERN_ERR "APIC never delivered???\n");
537 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
539 return (send_status | accept_status);
543 struct work_struct work;
544 struct task_struct *idle;
545 struct completion done;
549 void do_fork_idle(struct work_struct *work)
551 struct create_idle *c_idle =
552 container_of(work, struct create_idle, work);
554 c_idle->idle = fork_idle(c_idle->cpu);
555 complete(&c_idle->done);
561 static int __cpuinit do_boot_cpu(int cpu, int apicid)
563 unsigned long boot_error;
565 unsigned long start_rip;
566 struct create_idle c_idle = {
567 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
569 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
572 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
573 if (!cpu_gdt_descr[cpu].address &&
574 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
575 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
579 /* Allocate node local memory for AP pdas */
580 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
581 struct x8664_pda *newpda, *pda;
582 int node = cpu_to_node(cpu);
584 newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
587 memcpy(newpda, pda, sizeof (struct x8664_pda));
588 cpu_pda(cpu) = newpda;
591 "Could not allocate node local PDA for CPU %d on node %d\n",
595 alternatives_smp_switch(1);
597 c_idle.idle = get_idle_for_cpu(cpu);
600 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
601 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
602 init_idle(c_idle.idle, cpu);
607 * During cold boot process, keventd thread is not spun up yet.
608 * When we do cpu hot-add, we create idle threads on the fly, we should
609 * not acquire any attributes from the calling context. Hence the clean
610 * way to create kernel_threads() is to do that from keventd().
611 * We do the current_is_keventd() due to the fact that ACPI notifier
612 * was also queuing to keventd() and when the caller is already running
613 * in context of keventd(), we would end up with locking up the keventd
616 if (!keventd_up() || current_is_keventd())
617 c_idle.work.func(&c_idle.work);
619 schedule_work(&c_idle.work);
620 wait_for_completion(&c_idle.done);
623 if (IS_ERR(c_idle.idle)) {
624 printk("failed fork for CPU %d\n", cpu);
625 return PTR_ERR(c_idle.idle);
628 set_idle_for_cpu(cpu, c_idle.idle);
632 cpu_pda(cpu)->pcurrent = c_idle.idle;
634 start_rip = setup_trampoline();
636 init_rsp = c_idle.idle->thread.rsp;
637 per_cpu(init_tss,cpu).rsp0 = init_rsp;
638 initial_code = start_secondary;
639 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
641 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
642 cpus_weight(cpu_present_map),
646 * This grunge runs the startup process for
647 * the targeted processor.
650 atomic_set(&init_deasserted, 0);
652 Dprintk("Setting warm reset code and vector.\n");
654 CMOS_WRITE(0xa, 0xf);
657 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
659 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
663 * Be paranoid about clearing APIC errors.
665 apic_write(APIC_ESR, 0);
669 * Status is now clean
674 * Starting actual IPI sequence...
676 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
680 * allow APs to start initializing.
682 Dprintk("Before Callout %d.\n", cpu);
683 cpu_set(cpu, cpu_callout_map);
684 Dprintk("After Callout %d.\n", cpu);
687 * Wait 5s total for a response
689 for (timeout = 0; timeout < 50000; timeout++) {
690 if (cpu_isset(cpu, cpu_callin_map))
691 break; /* It has booted */
695 if (cpu_isset(cpu, cpu_callin_map)) {
696 /* number CPUs logically, starting from 1 (BSP is 0) */
697 Dprintk("CPU has booted.\n");
700 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
702 /* trampoline started but...? */
703 printk("Stuck ??\n");
705 /* trampoline code not run */
706 printk("Not responding.\n");
708 inquire_remote_apic(apicid);
713 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
714 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
715 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
716 cpu_clear(cpu, cpu_present_map);
717 cpu_clear(cpu, cpu_possible_map);
718 x86_cpu_to_apicid[cpu] = BAD_APICID;
719 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
726 cycles_t cacheflush_time;
727 unsigned long cache_decay_ticks;
730 * Cleanup possible dangling ends...
732 static __cpuinit void smp_cleanup_boot(void)
735 * Paranoid: Set warm reset code and vector here back
741 * Reset trampoline flag
743 *((volatile int *) phys_to_virt(0x467)) = 0;
747 * Fall back to non SMP mode after errors.
749 * RED-PEN audit/test this more. I bet there is more state messed up here.
751 static __init void disable_smp(void)
753 cpu_present_map = cpumask_of_cpu(0);
754 cpu_possible_map = cpumask_of_cpu(0);
755 if (smp_found_config)
756 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
758 phys_cpu_present_map = physid_mask_of_physid(0);
759 cpu_set(0, cpu_sibling_map[0]);
760 cpu_set(0, cpu_core_map[0]);
763 #ifdef CONFIG_HOTPLUG_CPU
765 int additional_cpus __initdata = -1;
768 * cpu_possible_map should be static, it cannot change as cpu's
769 * are onlined, or offlined. The reason is per-cpu data-structures
770 * are allocated by some modules at init time, and dont expect to
771 * do this dynamically on cpu arrival/departure.
772 * cpu_present_map on the other hand can change dynamically.
773 * In case when cpu_hotplug is not compiled, then we resort to current
774 * behaviour, which is cpu_possible == cpu_present.
777 * Three ways to find out the number of additional hotplug CPUs:
778 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
779 * - The user can overwrite it with additional_cpus=NUM
780 * - Otherwise don't reserve additional CPUs.
781 * We do this because additional CPUs waste a lot of memory.
784 __init void prefill_possible_map(void)
789 if (additional_cpus == -1) {
790 if (disabled_cpus > 0)
791 additional_cpus = disabled_cpus;
795 possible = num_processors + additional_cpus;
796 if (possible > NR_CPUS)
799 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
801 max_t(int, possible - num_processors, 0));
803 for (i = 0; i < possible; i++)
804 cpu_set(i, cpu_possible_map);
809 * Various sanity checks.
811 static int __init smp_sanity_check(unsigned max_cpus)
813 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
814 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
815 hard_smp_processor_id());
816 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
820 * If we couldn't find an SMP configuration at boot time,
821 * get out of here now!
823 if (!smp_found_config) {
824 printk(KERN_NOTICE "SMP motherboard not detected.\n");
826 if (APIC_init_uniprocessor())
827 printk(KERN_NOTICE "Local APIC not detected."
828 " Using dummy APIC emulation.\n");
833 * Should not be necessary because the MP table should list the boot
834 * CPU too, but we do it for the sake of robustness anyway.
836 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
837 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
839 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
843 * If we couldn't find a local APIC, then get out of here now!
846 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
848 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
854 * If SMP should be disabled, then really disable it!
857 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
866 * Prepare for SMP bootup. The MP table or ACPI has been read
867 * earlier. Just do some sanity checking here and enable APIC mode.
869 void __init smp_prepare_cpus(unsigned int max_cpus)
871 nmi_watchdog_default();
872 current_cpu_data = boot_cpu_data;
873 current_thread_info()->cpu = 0; /* needed? */
874 set_cpu_sibling_map(0);
876 if (smp_sanity_check(max_cpus) < 0) {
877 printk(KERN_INFO "SMP disabled\n");
884 * Switch from PIC to APIC mode.
888 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
889 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
890 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
891 /* Or can we switch back to PIC here? */
895 * Now start the IO-APICs
897 if (!skip_ioapic_setup && nr_ioapics)
903 * Set up local APIC timer on boot CPU.
906 setup_boot_APIC_clock();
910 * Early setup to make printk work.
912 void __init smp_prepare_boot_cpu(void)
914 int me = smp_processor_id();
915 cpu_set(me, cpu_online_map);
916 cpu_set(me, cpu_callout_map);
917 per_cpu(cpu_state, me) = CPU_ONLINE;
921 * Entry point to boot a CPU.
923 int __cpuinit __cpu_up(unsigned int cpu)
925 int apicid = cpu_present_to_apicid(cpu);
929 WARN_ON(irqs_disabled());
931 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
933 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
934 !physid_isset(apicid, phys_cpu_present_map)) {
935 printk("__cpu_up: bad cpu %d\n", cpu);
940 * Already booted CPU?
942 if (cpu_isset(cpu, cpu_callin_map)) {
943 Dprintk("do_boot_cpu %d Already started\n", cpu);
948 * Save current MTRR state in case it was changed since early boot
949 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
953 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
955 err = do_boot_cpu(cpu, apicid);
957 Dprintk("do_boot_cpu failed %d\n", err);
961 /* Unleash the CPU! */
962 Dprintk("waiting for cpu %d\n", cpu);
965 * Make sure and check TSC sync:
967 local_irq_save(flags);
968 check_tsc_sync_source(cpu);
969 local_irq_restore(flags);
971 while (!cpu_isset(cpu, cpu_online_map))
979 * Finish the SMP boot.
981 void __init smp_cpus_done(unsigned int max_cpus)
985 check_nmi_watchdog();
988 #ifdef CONFIG_HOTPLUG_CPU
990 static void remove_siblinginfo(int cpu)
993 struct cpuinfo_x86 *c = cpu_data;
995 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
996 cpu_clear(cpu, cpu_core_map[sibling]);
998 * last thread sibling in this cpu core going down
1000 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1001 c[sibling].booted_cores--;
1004 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1005 cpu_clear(cpu, cpu_sibling_map[sibling]);
1006 cpus_clear(cpu_sibling_map[cpu]);
1007 cpus_clear(cpu_core_map[cpu]);
1008 c[cpu].phys_proc_id = 0;
1009 c[cpu].cpu_core_id = 0;
1010 cpu_clear(cpu, cpu_sibling_setup_map);
1013 void remove_cpu_from_maps(void)
1015 int cpu = smp_processor_id();
1017 cpu_clear(cpu, cpu_callout_map);
1018 cpu_clear(cpu, cpu_callin_map);
1019 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
1020 clear_node_cpumask(cpu);
1023 int __cpu_disable(void)
1025 int cpu = smp_processor_id();
1028 * Perhaps use cpufreq to drop frequency, but that could go
1029 * into generic code.
1031 * We won't take down the boot processor on i386 due to some
1032 * interrupts only being able to be serviced by the BSP.
1033 * Especially so if we're not using an IOAPIC -zwane
1038 if (nmi_watchdog == NMI_LOCAL_APIC)
1039 stop_apic_nmi_watchdog(NULL);
1044 * Allow any queued timer interrupts to get serviced
1045 * This is only a temporary solution until we cleanup
1046 * fixup_irqs as we do for IA64.
1051 local_irq_disable();
1052 remove_siblinginfo(cpu);
1054 spin_lock(&vector_lock);
1055 /* It's now safe to remove this processor from the online map */
1056 cpu_clear(cpu, cpu_online_map);
1057 spin_unlock(&vector_lock);
1058 remove_cpu_from_maps();
1059 fixup_irqs(cpu_online_map);
1063 void __cpu_die(unsigned int cpu)
1065 /* We don't do anything here: idle task is faking death itself. */
1068 for (i = 0; i < 10; i++) {
1069 /* They ack this in play_dead by setting CPU_DEAD */
1070 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1071 printk ("CPU %d is now offline\n", cpu);
1072 if (1 == num_online_cpus())
1073 alternatives_smp_switch(0);
1078 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1081 static __init int setup_additional_cpus(char *s)
1083 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
1085 early_param("additional_cpus", setup_additional_cpus);
1087 #else /* ... !CONFIG_HOTPLUG_CPU */
1089 int __cpu_disable(void)
1094 void __cpu_die(unsigned int cpu)
1096 /* We said "no" in __cpu_disable */
1099 #endif /* CONFIG_HOTPLUG_CPU */