2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
40 #include <linux/config.h>
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/smp_lock.h>
46 #include <linux/irq.h>
47 #include <linux/bootmem.h>
48 #include <linux/thread_info.h>
49 #include <linux/module.h>
51 #include <linux/delay.h>
52 #include <linux/mc146818rtc.h>
54 #include <asm/pgalloc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
60 /* Change for real CPU hotplug. Note other files need to be fixed
62 #define __cpuinit __init
63 #define __cpuinitdata __initdata
65 /* Number of siblings per CPU package */
66 int smp_num_siblings = 1;
67 /* Package ID of each logical CPU */
68 u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
69 u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
70 EXPORT_SYMBOL(phys_proc_id);
71 EXPORT_SYMBOL(cpu_core_id);
73 /* Bitmask of currently online CPUs */
74 cpumask_t cpu_online_map;
76 EXPORT_SYMBOL(cpu_online_map);
79 * Private maps to synchronize booting between AP and BP.
80 * Probably not needed anymore, but it makes for easier debugging. -AK
82 cpumask_t cpu_callin_map;
83 cpumask_t cpu_callout_map;
85 cpumask_t cpu_possible_map;
86 EXPORT_SYMBOL(cpu_possible_map);
88 /* Per CPU bogomips and other parameters */
89 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
91 /* Set when the idlers are all forked */
92 int smp_threads_ready;
94 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
95 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
98 * Trampoline 80x86 program as an array.
101 extern unsigned char trampoline_data[];
102 extern unsigned char trampoline_end[];
105 * Currently trivial. Write the real->protected mode
106 * bootstrap into the page concerned. The caller
107 * has made sure it's suitably aligned.
110 static unsigned long __cpuinit setup_trampoline(void)
112 void *tramp = __va(SMP_TRAMPOLINE_BASE);
113 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
114 return virt_to_phys(tramp);
118 * The bootstrap kernel entry code has set these up. Save them for
122 static void __cpuinit smp_store_cpu_info(int id)
124 struct cpuinfo_x86 *c = cpu_data + id;
132 * New Funky TSC sync algorithm borrowed from IA64.
133 * Main advantage is that it doesn't reset the TSCs fully and
134 * in general looks more robust and it works better than my earlier
135 * attempts. I believe it was written by David Mosberger. Some minor
136 * adjustments for x86-64 by me -AK
138 * Original comment reproduced below.
140 * Synchronize TSC of the current (slave) CPU with the TSC of the
141 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
142 * eliminate the possibility of unaccounted-for errors (such as
143 * getting a machine check in the middle of a calibration step). The
144 * basic idea is for the slave to ask the master what itc value it has
145 * and to read its own itc before and after the master responds. Each
146 * iteration gives us three timestamps:
159 * The goal is to adjust the slave's TSC such that tm falls exactly
160 * half-way between t0 and t1. If we achieve this, the clocks are
161 * synchronized provided the interconnect between the slave and the
162 * master is symmetric. Even if the interconnect were asymmetric, we
163 * would still know that the synchronization error is smaller than the
164 * roundtrip latency (t0 - t1).
166 * When the interconnect is quiet and symmetric, this lets us
167 * synchronize the TSC to within one or two cycles. However, we can
168 * only *guarantee* that the synchronization is accurate to within a
169 * round-trip time, which is typically in the range of several hundred
170 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
171 * are usually almost perfectly synchronized, but we shouldn't assume
172 * that the accuracy is much better than half a micro second or so.
174 * [there are other errors like the latency of RDTSC and of the
175 * WRMSR. These can also account to hundreds of cycles. So it's
176 * probably worse. It claims 153 cycles error on a dual Opteron,
177 * but I suspect the numbers are actually somewhat worse -AK]
181 #define SLAVE (SMP_CACHE_BYTES/8)
183 /* Intentionally don't use cpu_relax() while TSC synchronization
184 because we don't want to go into funky power save modi or cause
185 hypervisors to schedule us away. Going to sleep would likely affect
186 latency and low latency is the primary objective here. -AK */
187 #define no_cpu_relax() barrier()
189 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
190 static volatile __cpuinitdata unsigned long go[SLAVE + 1];
191 static int notscsync __cpuinitdata;
193 #undef DEBUG_TSC_SYNC
195 #define NUM_ROUNDS 64 /* magic value */
196 #define NUM_ITERS 5 /* likewise */
198 /* Callback on boot CPU */
199 static __cpuinit void sync_master(void *arg)
201 unsigned long flags, i;
203 if (smp_processor_id() != boot_cpu_id)
208 local_irq_save(flags);
210 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
217 local_irq_restore(flags);
221 * Return the number of cycles by which our tsc differs from the tsc
222 * on the master (time-keeper) CPU. A positive number indicates our
223 * tsc is ahead of the master, negative that it is behind.
226 get_delta(long *rt, long *master)
228 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
229 unsigned long tcenter, t0, t1, tm;
232 for (i = 0; i < NUM_ITERS; ++i) {
235 while (!(tm = go[SLAVE]))
240 if (t1 - t0 < best_t1 - best_t0)
241 best_t0 = t0, best_t1 = t1, best_tm = tm;
244 *rt = best_t1 - best_t0;
245 *master = best_tm - best_t0;
247 /* average best_t0 and best_t1 without overflow: */
248 tcenter = (best_t0/2 + best_t1/2);
249 if (best_t0 % 2 + best_t1 % 2 == 2)
251 return tcenter - best_tm;
254 static __cpuinit void sync_tsc(void)
257 long delta, adj, adjust_latency = 0;
258 unsigned long flags, rt, master_time_stamp, bound;
260 static struct syncdebug {
261 long rt; /* roundtrip time */
262 long master; /* master's timestamp */
263 long diff; /* difference between midpoint and master's timestamp */
264 long lat; /* estimate of tsc adjustment latency */
265 } t[NUM_ROUNDS] __cpuinitdata;
270 smp_call_function(sync_master, NULL, 1, 0);
272 while (go[MASTER]) /* wait for master to be ready */
275 spin_lock_irqsave(&tsc_sync_lock, flags);
277 for (i = 0; i < NUM_ROUNDS; ++i) {
278 delta = get_delta(&rt, &master_time_stamp);
280 done = 1; /* let's lock on to this... */
287 adjust_latency += -delta;
288 adj = -delta + adjust_latency/4;
293 wrmsrl(MSR_IA32_TSC, t + adj);
297 t[i].master = master_time_stamp;
299 t[i].lat = adjust_latency/4;
303 spin_unlock_irqrestore(&tsc_sync_lock, flags);
306 for (i = 0; i < NUM_ROUNDS; ++i)
307 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
308 t[i].rt, t[i].master, t[i].diff, t[i].lat);
312 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
313 "maxerr %lu cycles)\n",
314 smp_processor_id(), boot_cpu_id, delta, rt);
317 static void __cpuinit tsc_sync_wait(void)
319 if (notscsync || !cpu_has_tsc)
321 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
326 static __init int notscsync_setup(char *s)
331 __setup("notscsync", notscsync_setup);
333 static atomic_t init_deasserted __cpuinitdata;
336 * Report back to the Boot Processor.
339 void __cpuinit smp_callin(void)
342 unsigned long timeout;
345 * If waken up by an INIT in an 82489DX configuration
346 * we may get here before an INIT-deassert IPI reaches
347 * our local APIC. We have to wait for the IPI or we'll
348 * lock up on an APIC access.
350 while (!atomic_read(&init_deasserted))
354 * (This works even if the APIC is not enabled.)
356 phys_id = GET_APIC_ID(apic_read(APIC_ID));
357 cpuid = smp_processor_id();
358 if (cpu_isset(cpuid, cpu_callin_map)) {
359 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
362 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
365 * STARTUP IPIs are fragile beasts as they might sometimes
366 * trigger some glue motherboard logic. Complete APIC bus
367 * silence for 1 second, this overestimates the time the
368 * boot CPU is spending to send the up to 2 STARTUP IPIs
369 * by a factor of two. This should be enough.
373 * Waiting 2s total for startup (udelay is not yet working)
375 timeout = jiffies + 2*HZ;
376 while (time_before(jiffies, timeout)) {
378 * Has the boot CPU finished it's STARTUP sequence?
380 if (cpu_isset(cpuid, cpu_callout_map))
385 if (!time_before(jiffies, timeout)) {
386 panic("smp_callin: CPU%d started up but did not get a callout!\n",
391 * the boot CPU has finished the init stage and is spinning
392 * on callin_map until we finish. We are free to set up this
393 * CPU, first the APIC. (this is probably redundant on most
397 Dprintk("CALLIN, before setup_local_APIC().\n");
404 Dprintk("Stack at about %p\n",&cpuid);
406 disable_APIC_timer();
409 * Save our processor parameters
411 smp_store_cpu_info(cpuid);
414 * Allow the master to continue.
416 cpu_set(cpuid, cpu_callin_map);
420 * Setup code on secondary processor (after comming out of the trampoline)
422 void __cpuinit start_secondary(void)
425 * Dont put anything before smp_callin(), SMP
426 * booting is too fragile that we want to limit the
427 * things done here to the most necessary things.
432 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
435 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
436 setup_secondary_APIC_clock();
438 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
440 if (nmi_watchdog == NMI_IO_APIC) {
441 disable_8259A_irq(0);
442 enable_NMI_through_LVT0(NULL);
449 * Allow the master to continue.
451 cpu_set(smp_processor_id(), cpu_online_map);
454 /* Wait for TSC sync to not schedule things before.
455 We still process interrupts, which could see an inconsistent
456 time in that window unfortunately. */
462 extern volatile unsigned long init_rsp;
463 extern void (*initial_code)(void);
466 static void inquire_remote_apic(int apicid)
468 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
469 char *names[] = { "ID", "VERSION", "SPIV" };
472 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
474 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
475 printk("... APIC #%d %s: ", apicid, names[i]);
480 apic_wait_icr_idle();
482 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
483 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
488 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
489 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
492 case APIC_ICR_RR_VALID:
493 status = apic_read(APIC_RRR);
494 printk("%08x\n", status);
504 * Kick the secondary to wake up.
506 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
508 unsigned long send_status = 0, accept_status = 0;
509 int maxlvt, timeout, num_starts, j;
511 Dprintk("Asserting INIT.\n");
514 * Turn INIT on target chip
516 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
521 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
524 Dprintk("Waiting for send to finish...\n");
529 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
530 } while (send_status && (timeout++ < 1000));
534 Dprintk("Deasserting INIT.\n");
537 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
540 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
542 Dprintk("Waiting for send to finish...\n");
547 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
548 } while (send_status && (timeout++ < 1000));
550 atomic_set(&init_deasserted, 1);
553 * Should we send STARTUP IPIs ?
555 * Determine this based on the APIC version.
556 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
558 if (APIC_INTEGRATED(apic_version[phys_apicid]))
564 * Run STARTUP IPI loop.
566 Dprintk("#startup loops: %d.\n", num_starts);
568 maxlvt = get_maxlvt();
570 for (j = 1; j <= num_starts; j++) {
571 Dprintk("Sending STARTUP #%d.\n",j);
572 apic_read_around(APIC_SPIV);
573 apic_write(APIC_ESR, 0);
575 Dprintk("After apic_write.\n");
582 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
584 /* Boot on the stack */
585 /* Kick the second */
586 apic_write_around(APIC_ICR, APIC_DM_STARTUP
587 | (start_rip >> 12));
590 * Give the other CPU some time to accept the IPI.
594 Dprintk("Startup point 1.\n");
596 Dprintk("Waiting for send to finish...\n");
601 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
602 } while (send_status && (timeout++ < 1000));
605 * Give the other CPU some time to accept the IPI.
609 * Due to the Pentium erratum 3AP.
612 apic_read_around(APIC_SPIV);
613 apic_write(APIC_ESR, 0);
615 accept_status = (apic_read(APIC_ESR) & 0xEF);
616 if (send_status || accept_status)
619 Dprintk("After Startup.\n");
622 printk(KERN_ERR "APIC never delivered???\n");
624 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
626 return (send_status | accept_status);
632 static int __cpuinit do_boot_cpu(int cpu, int apicid)
634 struct task_struct *idle;
635 unsigned long boot_error;
637 unsigned long start_rip;
639 * We can't use kernel_thread since we must avoid to
640 * reschedule the child.
642 idle = fork_idle(cpu);
644 printk("failed fork for CPU %d\n", cpu);
645 return PTR_ERR(idle);
647 x86_cpu_to_apicid[cpu] = apicid;
649 cpu_pda[cpu].pcurrent = idle;
651 start_rip = setup_trampoline();
653 init_rsp = idle->thread.rsp;
654 per_cpu(init_tss,cpu).rsp0 = init_rsp;
655 initial_code = start_secondary;
656 clear_ti_thread_flag(idle->thread_info, TIF_FORK);
658 printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
659 start_rip, init_rsp);
662 * This grunge runs the startup process for
663 * the targeted processor.
666 atomic_set(&init_deasserted, 0);
668 Dprintk("Setting warm reset code and vector.\n");
670 CMOS_WRITE(0xa, 0xf);
673 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
675 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
679 * Be paranoid about clearing APIC errors.
681 if (APIC_INTEGRATED(apic_version[apicid])) {
682 apic_read_around(APIC_SPIV);
683 apic_write(APIC_ESR, 0);
688 * Status is now clean
693 * Starting actual IPI sequence...
695 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
699 * allow APs to start initializing.
701 Dprintk("Before Callout %d.\n", cpu);
702 cpu_set(cpu, cpu_callout_map);
703 Dprintk("After Callout %d.\n", cpu);
706 * Wait 5s total for a response
708 for (timeout = 0; timeout < 50000; timeout++) {
709 if (cpu_isset(cpu, cpu_callin_map))
710 break; /* It has booted */
714 if (cpu_isset(cpu, cpu_callin_map)) {
715 /* number CPUs logically, starting from 1 (BSP is 0) */
716 Dprintk("CPU has booted.\n");
719 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
721 /* trampoline started but...? */
722 printk("Stuck ??\n");
724 /* trampoline code not run */
725 printk("Not responding.\n");
727 inquire_remote_apic(apicid);
732 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
733 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
734 cpu_clear(cpu, cpu_present_map);
735 cpu_clear(cpu, cpu_possible_map);
736 x86_cpu_to_apicid[cpu] = BAD_APICID;
737 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
744 cycles_t cacheflush_time;
745 unsigned long cache_decay_ticks;
748 * Construct cpu_sibling_map[], so that we can tell the sibling CPU
749 * on SMT systems efficiently.
751 static __cpuinit void detect_siblings(void)
755 for (cpu = 0; cpu < NR_CPUS; cpu++) {
756 cpus_clear(cpu_sibling_map[cpu]);
757 cpus_clear(cpu_core_map[cpu]);
760 for_each_online_cpu (cpu) {
761 struct cpuinfo_x86 *c = cpu_data + cpu;
764 if (smp_num_siblings > 1) {
765 for_each_online_cpu (i) {
766 if (cpu_core_id[cpu] == cpu_core_id[i]) {
768 cpu_set(i, cpu_sibling_map[cpu]);
773 cpu_set(cpu, cpu_sibling_map[cpu]);
776 if (siblings != smp_num_siblings) {
778 "WARNING: %d siblings found for CPU%d, should be %d\n",
779 siblings, cpu, smp_num_siblings);
780 smp_num_siblings = siblings;
782 if (c->x86_num_cores > 1) {
783 for_each_online_cpu(i) {
784 if (phys_proc_id[cpu] == phys_proc_id[i])
785 cpu_set(i, cpu_core_map[cpu]);
788 cpu_core_map[cpu] = cpu_sibling_map[cpu];
793 * Cleanup possible dangling ends...
795 static __cpuinit void smp_cleanup_boot(void)
798 * Paranoid: Set warm reset code and vector here back
804 * Reset trampoline flag
806 *((volatile int *) phys_to_virt(0x467)) = 0;
808 #ifndef CONFIG_HOTPLUG_CPU
810 * Free pages reserved for SMP bootup.
811 * When you add hotplug CPU support later remove this
812 * Note there is more work to be done for later CPU bootup.
815 free_page((unsigned long) __va(PAGE_SIZE));
816 free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
821 * Fall back to non SMP mode after errors.
823 * RED-PEN audit/test this more. I bet there is more state messed up here.
825 static __cpuinit void disable_smp(void)
827 cpu_present_map = cpumask_of_cpu(0);
828 cpu_possible_map = cpumask_of_cpu(0);
829 if (smp_found_config)
830 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
832 phys_cpu_present_map = physid_mask_of_physid(0);
833 cpu_set(0, cpu_sibling_map[0]);
834 cpu_set(0, cpu_core_map[0]);
838 * Handle user cpus=... parameter.
840 static __cpuinit void enforce_max_cpus(unsigned max_cpus)
844 for (i = 0; i < NR_CPUS; i++) {
845 if (!cpu_possible(i))
847 if (++k > max_cpus) {
848 cpu_clear(i, cpu_possible_map);
849 cpu_clear(i, cpu_present_map);
855 * Various sanity checks.
857 static int __cpuinit smp_sanity_check(unsigned max_cpus)
859 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
860 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
861 hard_smp_processor_id());
862 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
866 * If we couldn't find an SMP configuration at boot time,
867 * get out of here now!
869 if (!smp_found_config) {
870 printk(KERN_NOTICE "SMP motherboard not detected.\n");
872 if (APIC_init_uniprocessor())
873 printk(KERN_NOTICE "Local APIC not detected."
874 " Using dummy APIC emulation.\n");
879 * Should not be necessary because the MP table should list the boot
880 * CPU too, but we do it for the sake of robustness anyway.
882 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
883 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
885 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
889 * If we couldn't find a local APIC, then get out of here now!
891 if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
892 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
894 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
900 * If SMP should be disabled, then really disable it!
903 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
912 * Prepare for SMP bootup. The MP table or ACPI has been read
913 * earlier. Just do some sanity checking here and enable APIC mode.
915 void __cpuinit smp_prepare_cpus(unsigned int max_cpus)
919 nmi_watchdog_default();
920 current_cpu_data = boot_cpu_data;
921 current_thread_info()->cpu = 0; /* needed? */
923 enforce_max_cpus(max_cpus);
926 * Fill in cpu_present_mask
928 for (i = 0; i < NR_CPUS; i++) {
929 int apicid = cpu_present_to_apicid(i);
930 if (physid_isset(apicid, phys_cpu_present_map)) {
931 cpu_set(i, cpu_present_map);
932 /* possible map would be different if we supported real
934 cpu_set(i, cpu_possible_map);
938 if (smp_sanity_check(max_cpus) < 0) {
939 printk(KERN_INFO "SMP disabled\n");
946 * Switch from PIC to APIC mode.
951 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
952 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
953 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
954 /* Or can we switch back to PIC here? */
956 x86_cpu_to_apicid[0] = boot_cpu_id;
959 * Now start the IO-APICs
961 if (!skip_ioapic_setup && nr_ioapics)
967 * Set up local APIC timer on boot CPU.
970 setup_boot_APIC_clock();
974 * Early setup to make printk work.
976 void __init smp_prepare_boot_cpu(void)
978 int me = smp_processor_id();
979 cpu_set(me, cpu_online_map);
980 cpu_set(me, cpu_callout_map);
984 * Entry point to boot a CPU.
986 * This is all __cpuinit, not __devinit for now because we don't support
989 int __cpuinit __cpu_up(unsigned int cpu)
992 int apicid = cpu_present_to_apicid(cpu);
994 WARN_ON(irqs_disabled());
996 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
998 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
999 !physid_isset(apicid, phys_cpu_present_map)) {
1000 printk("__cpu_up: bad cpu %d\n", cpu);
1005 err = do_boot_cpu(cpu, apicid);
1007 Dprintk("do_boot_cpu failed %d\n", err);
1011 /* Unleash the CPU! */
1012 Dprintk("waiting for cpu %d\n", cpu);
1014 while (!cpu_isset(cpu, cpu_online_map))
1020 * Finish the SMP boot.
1022 void __cpuinit smp_cpus_done(unsigned int max_cpus)
1027 #ifdef CONFIG_X86_IO_APIC
1028 setup_ioapic_dest();