2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/config.h>
42 #include <linux/init.h>
45 #include <linux/kernel_stat.h>
46 #include <linux/smp_lock.h>
47 #include <linux/irq.h>
48 #include <linux/bootmem.h>
49 #include <linux/thread_info.h>
50 #include <linux/module.h>
52 #include <linux/delay.h>
53 #include <linux/mc146818rtc.h>
55 #include <asm/pgalloc.h>
57 #include <asm/kdebug.h>
58 #include <asm/tlbflush.h>
59 #include <asm/proto.h>
62 /* Number of siblings per CPU package */
63 int smp_num_siblings = 1;
64 /* Package ID of each logical CPU */
65 u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
66 u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
67 EXPORT_SYMBOL(phys_proc_id);
68 EXPORT_SYMBOL(cpu_core_id);
70 /* Bitmask of currently online CPUs */
71 cpumask_t cpu_online_map;
73 EXPORT_SYMBOL(cpu_online_map);
76 * Private maps to synchronize booting between AP and BP.
77 * Probably not needed anymore, but it makes for easier debugging. -AK
79 cpumask_t cpu_callin_map;
80 cpumask_t cpu_callout_map;
82 cpumask_t cpu_possible_map;
83 EXPORT_SYMBOL(cpu_possible_map);
85 /* Per CPU bogomips and other parameters */
86 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
88 /* Set when the idlers are all forked */
89 int smp_threads_ready;
91 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
92 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
93 EXPORT_SYMBOL(cpu_core_map);
96 * Trampoline 80x86 program as an array.
99 extern unsigned char trampoline_data[];
100 extern unsigned char trampoline_end[];
102 /* State of each CPU */
103 DEFINE_PER_CPU(int, cpu_state) = { 0 };
106 * Store all idle threads, this can be reused instead of creating
107 * a new thread. Also avoids complicated thread destroy functionality
110 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
112 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
113 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
116 * cpu_possible_map should be static, it cannot change as cpu's
117 * are onlined, or offlined. The reason is per-cpu data-structures
118 * are allocated by some modules at init time, and dont expect to
119 * do this dynamically on cpu arrival/departure.
120 * cpu_present_map on the other hand can change dynamically.
121 * In case when cpu_hotplug is not compiled, then we resort to current
122 * behaviour, which is cpu_possible == cpu_present.
123 * If cpu-hotplug is supported, then we need to preallocate for all
124 * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range.
127 #ifdef CONFIG_HOTPLUG_CPU
128 #define fixup_cpu_possible_map(x) cpu_set((x), cpu_possible_map)
130 #define fixup_cpu_possible_map(x)
134 * Currently trivial. Write the real->protected mode
135 * bootstrap into the page concerned. The caller
136 * has made sure it's suitably aligned.
139 static unsigned long __cpuinit setup_trampoline(void)
141 void *tramp = __va(SMP_TRAMPOLINE_BASE);
142 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
143 return virt_to_phys(tramp);
147 * The bootstrap kernel entry code has set these up. Save them for
151 static void __cpuinit smp_store_cpu_info(int id)
153 struct cpuinfo_x86 *c = cpu_data + id;
161 * New Funky TSC sync algorithm borrowed from IA64.
162 * Main advantage is that it doesn't reset the TSCs fully and
163 * in general looks more robust and it works better than my earlier
164 * attempts. I believe it was written by David Mosberger. Some minor
165 * adjustments for x86-64 by me -AK
167 * Original comment reproduced below.
169 * Synchronize TSC of the current (slave) CPU with the TSC of the
170 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
171 * eliminate the possibility of unaccounted-for errors (such as
172 * getting a machine check in the middle of a calibration step). The
173 * basic idea is for the slave to ask the master what itc value it has
174 * and to read its own itc before and after the master responds. Each
175 * iteration gives us three timestamps:
188 * The goal is to adjust the slave's TSC such that tm falls exactly
189 * half-way between t0 and t1. If we achieve this, the clocks are
190 * synchronized provided the interconnect between the slave and the
191 * master is symmetric. Even if the interconnect were asymmetric, we
192 * would still know that the synchronization error is smaller than the
193 * roundtrip latency (t0 - t1).
195 * When the interconnect is quiet and symmetric, this lets us
196 * synchronize the TSC to within one or two cycles. However, we can
197 * only *guarantee* that the synchronization is accurate to within a
198 * round-trip time, which is typically in the range of several hundred
199 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
200 * are usually almost perfectly synchronized, but we shouldn't assume
201 * that the accuracy is much better than half a micro second or so.
203 * [there are other errors like the latency of RDTSC and of the
204 * WRMSR. These can also account to hundreds of cycles. So it's
205 * probably worse. It claims 153 cycles error on a dual Opteron,
206 * but I suspect the numbers are actually somewhat worse -AK]
210 #define SLAVE (SMP_CACHE_BYTES/8)
212 /* Intentionally don't use cpu_relax() while TSC synchronization
213 because we don't want to go into funky power save modi or cause
214 hypervisors to schedule us away. Going to sleep would likely affect
215 latency and low latency is the primary objective here. -AK */
216 #define no_cpu_relax() barrier()
218 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
219 static volatile __cpuinitdata unsigned long go[SLAVE + 1];
220 static int notscsync __cpuinitdata;
222 #undef DEBUG_TSC_SYNC
224 #define NUM_ROUNDS 64 /* magic value */
225 #define NUM_ITERS 5 /* likewise */
227 /* Callback on boot CPU */
228 static __cpuinit void sync_master(void *arg)
230 unsigned long flags, i;
232 if (smp_processor_id() != 0)
237 local_irq_save(flags);
239 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
246 local_irq_restore(flags);
250 * Return the number of cycles by which our tsc differs from the tsc
251 * on the master (time-keeper) CPU. A positive number indicates our
252 * tsc is ahead of the master, negative that it is behind.
255 get_delta(long *rt, long *master)
257 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
258 unsigned long tcenter, t0, t1, tm;
261 for (i = 0; i < NUM_ITERS; ++i) {
264 while (!(tm = go[SLAVE]))
269 if (t1 - t0 < best_t1 - best_t0)
270 best_t0 = t0, best_t1 = t1, best_tm = tm;
273 *rt = best_t1 - best_t0;
274 *master = best_tm - best_t0;
276 /* average best_t0 and best_t1 without overflow: */
277 tcenter = (best_t0/2 + best_t1/2);
278 if (best_t0 % 2 + best_t1 % 2 == 2)
280 return tcenter - best_tm;
283 static __cpuinit void sync_tsc(void)
286 long delta, adj, adjust_latency = 0;
287 unsigned long flags, rt, master_time_stamp, bound;
288 #ifdef DEBUG_TSC_SYNC
289 static struct syncdebug {
290 long rt; /* roundtrip time */
291 long master; /* master's timestamp */
292 long diff; /* difference between midpoint and master's timestamp */
293 long lat; /* estimate of tsc adjustment latency */
294 } t[NUM_ROUNDS] __cpuinitdata;
299 smp_call_function(sync_master, NULL, 1, 0);
301 while (go[MASTER]) /* wait for master to be ready */
304 spin_lock_irqsave(&tsc_sync_lock, flags);
306 for (i = 0; i < NUM_ROUNDS; ++i) {
307 delta = get_delta(&rt, &master_time_stamp);
309 done = 1; /* let's lock on to this... */
316 adjust_latency += -delta;
317 adj = -delta + adjust_latency/4;
322 wrmsrl(MSR_IA32_TSC, t + adj);
324 #ifdef DEBUG_TSC_SYNC
326 t[i].master = master_time_stamp;
328 t[i].lat = adjust_latency/4;
332 spin_unlock_irqrestore(&tsc_sync_lock, flags);
334 #ifdef DEBUG_TSC_SYNC
335 for (i = 0; i < NUM_ROUNDS; ++i)
336 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
337 t[i].rt, t[i].master, t[i].diff, t[i].lat);
341 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
342 "maxerr %lu cycles)\n",
343 smp_processor_id(), boot_cpu_id, delta, rt);
346 static void __cpuinit tsc_sync_wait(void)
348 if (notscsync || !cpu_has_tsc)
350 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
355 static __init int notscsync_setup(char *s)
360 __setup("notscsync", notscsync_setup);
362 static atomic_t init_deasserted __cpuinitdata;
365 * Report back to the Boot Processor.
368 void __cpuinit smp_callin(void)
371 unsigned long timeout;
374 * If waken up by an INIT in an 82489DX configuration
375 * we may get here before an INIT-deassert IPI reaches
376 * our local APIC. We have to wait for the IPI or we'll
377 * lock up on an APIC access.
379 while (!atomic_read(&init_deasserted))
383 * (This works even if the APIC is not enabled.)
385 phys_id = GET_APIC_ID(apic_read(APIC_ID));
386 cpuid = smp_processor_id();
387 if (cpu_isset(cpuid, cpu_callin_map)) {
388 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
391 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
394 * STARTUP IPIs are fragile beasts as they might sometimes
395 * trigger some glue motherboard logic. Complete APIC bus
396 * silence for 1 second, this overestimates the time the
397 * boot CPU is spending to send the up to 2 STARTUP IPIs
398 * by a factor of two. This should be enough.
402 * Waiting 2s total for startup (udelay is not yet working)
404 timeout = jiffies + 2*HZ;
405 while (time_before(jiffies, timeout)) {
407 * Has the boot CPU finished it's STARTUP sequence?
409 if (cpu_isset(cpuid, cpu_callout_map))
414 if (!time_before(jiffies, timeout)) {
415 panic("smp_callin: CPU%d started up but did not get a callout!\n",
420 * the boot CPU has finished the init stage and is spinning
421 * on callin_map until we finish. We are free to set up this
422 * CPU, first the APIC. (this is probably redundant on most
426 Dprintk("CALLIN, before setup_local_APIC().\n");
433 Dprintk("Stack at about %p\n",&cpuid);
435 disable_APIC_timer();
438 * Save our processor parameters
440 smp_store_cpu_info(cpuid);
443 * Allow the master to continue.
445 cpu_set(cpuid, cpu_callin_map);
448 static inline void set_cpu_sibling_map(int cpu)
452 if (smp_num_siblings > 1) {
454 if (cpu_core_id[cpu] == cpu_core_id[i]) {
455 cpu_set(i, cpu_sibling_map[cpu]);
456 cpu_set(cpu, cpu_sibling_map[i]);
460 cpu_set(cpu, cpu_sibling_map[cpu]);
463 if (current_cpu_data.x86_num_cores > 1) {
465 if (phys_proc_id[cpu] == phys_proc_id[i]) {
466 cpu_set(i, cpu_core_map[cpu]);
467 cpu_set(cpu, cpu_core_map[i]);
471 cpu_core_map[cpu] = cpu_sibling_map[cpu];
476 * Setup code on secondary processor (after comming out of the trampoline)
478 void __cpuinit start_secondary(void)
481 * Dont put anything before smp_callin(), SMP
482 * booting is too fragile that we want to limit the
483 * things done here to the most necessary things.
488 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
491 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
492 setup_secondary_APIC_clock();
494 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
496 if (nmi_watchdog == NMI_IO_APIC) {
497 disable_8259A_irq(0);
498 enable_NMI_through_LVT0(NULL);
505 * The sibling maps must be set before turing the online map on for
508 set_cpu_sibling_map(smp_processor_id());
511 * We need to hold call_lock, so there is no inconsistency
512 * between the time smp_call_function() determines number of
513 * IPI receipients, and the time when the determination is made
514 * for which cpus receive the IPI in genapic_flat.c. Holding this
515 * lock helps us to not include this cpu in a currently in progress
516 * smp_call_function().
518 lock_ipi_call_lock();
521 * Allow the master to continue.
523 cpu_set(smp_processor_id(), cpu_online_map);
524 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
525 unlock_ipi_call_lock();
529 /* Wait for TSC sync to not schedule things before.
530 We still process interrupts, which could see an inconsistent
531 time in that window unfortunately. */
537 extern volatile unsigned long init_rsp;
538 extern void (*initial_code)(void);
541 static void inquire_remote_apic(int apicid)
543 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
544 char *names[] = { "ID", "VERSION", "SPIV" };
547 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
549 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
550 printk("... APIC #%d %s: ", apicid, names[i]);
555 apic_wait_icr_idle();
557 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
558 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
563 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
564 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
567 case APIC_ICR_RR_VALID:
568 status = apic_read(APIC_RRR);
569 printk("%08x\n", status);
579 * Kick the secondary to wake up.
581 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
583 unsigned long send_status = 0, accept_status = 0;
584 int maxlvt, timeout, num_starts, j;
586 Dprintk("Asserting INIT.\n");
589 * Turn INIT on target chip
591 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
596 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
599 Dprintk("Waiting for send to finish...\n");
604 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
605 } while (send_status && (timeout++ < 1000));
609 Dprintk("Deasserting INIT.\n");
612 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
615 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
617 Dprintk("Waiting for send to finish...\n");
622 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
623 } while (send_status && (timeout++ < 1000));
625 atomic_set(&init_deasserted, 1);
628 * Should we send STARTUP IPIs ?
630 * Determine this based on the APIC version.
631 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
633 if (APIC_INTEGRATED(apic_version[phys_apicid]))
639 * Run STARTUP IPI loop.
641 Dprintk("#startup loops: %d.\n", num_starts);
643 maxlvt = get_maxlvt();
645 for (j = 1; j <= num_starts; j++) {
646 Dprintk("Sending STARTUP #%d.\n",j);
647 apic_read_around(APIC_SPIV);
648 apic_write(APIC_ESR, 0);
650 Dprintk("After apic_write.\n");
657 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
659 /* Boot on the stack */
660 /* Kick the second */
661 apic_write_around(APIC_ICR, APIC_DM_STARTUP
662 | (start_rip >> 12));
665 * Give the other CPU some time to accept the IPI.
669 Dprintk("Startup point 1.\n");
671 Dprintk("Waiting for send to finish...\n");
676 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
677 } while (send_status && (timeout++ < 1000));
680 * Give the other CPU some time to accept the IPI.
684 * Due to the Pentium erratum 3AP.
687 apic_read_around(APIC_SPIV);
688 apic_write(APIC_ESR, 0);
690 accept_status = (apic_read(APIC_ESR) & 0xEF);
691 if (send_status || accept_status)
694 Dprintk("After Startup.\n");
697 printk(KERN_ERR "APIC never delivered???\n");
699 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
701 return (send_status | accept_status);
705 struct task_struct *idle;
706 struct completion done;
710 void do_fork_idle(void *_c_idle)
712 struct create_idle *c_idle = _c_idle;
714 c_idle->idle = fork_idle(c_idle->cpu);
715 complete(&c_idle->done);
721 static int __cpuinit do_boot_cpu(int cpu, int apicid)
723 unsigned long boot_error;
725 unsigned long start_rip;
726 struct create_idle c_idle = {
728 .done = COMPLETION_INITIALIZER(c_idle.done),
730 DECLARE_WORK(work, do_fork_idle, &c_idle);
732 c_idle.idle = get_idle_for_cpu(cpu);
735 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
736 (THREAD_SIZE + (unsigned long) c_idle.idle->thread_info)) - 1);
737 init_idle(c_idle.idle, cpu);
742 * During cold boot process, keventd thread is not spun up yet.
743 * When we do cpu hot-add, we create idle threads on the fly, we should
744 * not acquire any attributes from the calling context. Hence the clean
745 * way to create kernel_threads() is to do that from keventd().
746 * We do the current_is_keventd() due to the fact that ACPI notifier
747 * was also queuing to keventd() and when the caller is already running
748 * in context of keventd(), we would end up with locking up the keventd
751 if (!keventd_up() || current_is_keventd())
752 work.func(work.data);
754 schedule_work(&work);
755 wait_for_completion(&c_idle.done);
758 if (IS_ERR(c_idle.idle)) {
759 printk("failed fork for CPU %d\n", cpu);
760 return PTR_ERR(c_idle.idle);
763 set_idle_for_cpu(cpu, c_idle.idle);
767 cpu_pda[cpu].pcurrent = c_idle.idle;
769 start_rip = setup_trampoline();
771 init_rsp = c_idle.idle->thread.rsp;
772 per_cpu(init_tss,cpu).rsp0 = init_rsp;
773 initial_code = start_secondary;
774 clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK);
776 printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid,
777 start_rip, init_rsp);
780 * This grunge runs the startup process for
781 * the targeted processor.
784 atomic_set(&init_deasserted, 0);
786 Dprintk("Setting warm reset code and vector.\n");
788 CMOS_WRITE(0xa, 0xf);
791 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
793 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
797 * Be paranoid about clearing APIC errors.
799 if (APIC_INTEGRATED(apic_version[apicid])) {
800 apic_read_around(APIC_SPIV);
801 apic_write(APIC_ESR, 0);
806 * Status is now clean
811 * Starting actual IPI sequence...
813 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
817 * allow APs to start initializing.
819 Dprintk("Before Callout %d.\n", cpu);
820 cpu_set(cpu, cpu_callout_map);
821 Dprintk("After Callout %d.\n", cpu);
824 * Wait 5s total for a response
826 for (timeout = 0; timeout < 50000; timeout++) {
827 if (cpu_isset(cpu, cpu_callin_map))
828 break; /* It has booted */
832 if (cpu_isset(cpu, cpu_callin_map)) {
833 /* number CPUs logically, starting from 1 (BSP is 0) */
834 Dprintk("CPU has booted.\n");
837 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
839 /* trampoline started but...? */
840 printk("Stuck ??\n");
842 /* trampoline code not run */
843 printk("Not responding.\n");
845 inquire_remote_apic(apicid);
850 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
851 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
852 cpu_clear(cpu, cpu_present_map);
853 cpu_clear(cpu, cpu_possible_map);
854 x86_cpu_to_apicid[cpu] = BAD_APICID;
855 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
862 cycles_t cacheflush_time;
863 unsigned long cache_decay_ticks;
866 * Cleanup possible dangling ends...
868 static __cpuinit void smp_cleanup_boot(void)
871 * Paranoid: Set warm reset code and vector here back
877 * Reset trampoline flag
879 *((volatile int *) phys_to_virt(0x467)) = 0;
881 #ifndef CONFIG_HOTPLUG_CPU
883 * Free pages reserved for SMP bootup.
884 * When you add hotplug CPU support later remove this
885 * Note there is more work to be done for later CPU bootup.
888 free_page((unsigned long) __va(PAGE_SIZE));
889 free_page((unsigned long) __va(SMP_TRAMPOLINE_BASE));
894 * Fall back to non SMP mode after errors.
896 * RED-PEN audit/test this more. I bet there is more state messed up here.
898 static __init void disable_smp(void)
900 cpu_present_map = cpumask_of_cpu(0);
901 cpu_possible_map = cpumask_of_cpu(0);
902 if (smp_found_config)
903 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
905 phys_cpu_present_map = physid_mask_of_physid(0);
906 cpu_set(0, cpu_sibling_map[0]);
907 cpu_set(0, cpu_core_map[0]);
911 * Handle user cpus=... parameter.
913 static __init void enforce_max_cpus(unsigned max_cpus)
917 for (i = 0; i < NR_CPUS; i++) {
918 if (!cpu_possible(i))
920 if (++k > max_cpus) {
921 cpu_clear(i, cpu_possible_map);
922 cpu_clear(i, cpu_present_map);
928 * Various sanity checks.
930 static int __init smp_sanity_check(unsigned max_cpus)
932 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
933 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
934 hard_smp_processor_id());
935 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
939 * If we couldn't find an SMP configuration at boot time,
940 * get out of here now!
942 if (!smp_found_config) {
943 printk(KERN_NOTICE "SMP motherboard not detected.\n");
945 if (APIC_init_uniprocessor())
946 printk(KERN_NOTICE "Local APIC not detected."
947 " Using dummy APIC emulation.\n");
952 * Should not be necessary because the MP table should list the boot
953 * CPU too, but we do it for the sake of robustness anyway.
955 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
956 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
958 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
962 * If we couldn't find a local APIC, then get out of here now!
964 if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
965 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
967 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
973 * If SMP should be disabled, then really disable it!
976 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
985 * Prepare for SMP bootup. The MP table or ACPI has been read
986 * earlier. Just do some sanity checking here and enable APIC mode.
988 void __init smp_prepare_cpus(unsigned int max_cpus)
992 nmi_watchdog_default();
993 current_cpu_data = boot_cpu_data;
994 current_thread_info()->cpu = 0; /* needed? */
996 enforce_max_cpus(max_cpus);
999 * Fill in cpu_present_mask
1001 for (i = 0; i < NR_CPUS; i++) {
1002 int apicid = cpu_present_to_apicid(i);
1003 if (physid_isset(apicid, phys_cpu_present_map)) {
1004 cpu_set(i, cpu_present_map);
1005 cpu_set(i, cpu_possible_map);
1007 fixup_cpu_possible_map(i);
1010 if (smp_sanity_check(max_cpus) < 0) {
1011 printk(KERN_INFO "SMP disabled\n");
1018 * Switch from PIC to APIC mode.
1023 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
1024 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1025 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
1026 /* Or can we switch back to PIC here? */
1030 * Now start the IO-APICs
1032 if (!skip_ioapic_setup && nr_ioapics)
1038 * Set up local APIC timer on boot CPU.
1041 setup_boot_APIC_clock();
1045 * Early setup to make printk work.
1047 void __init smp_prepare_boot_cpu(void)
1049 int me = smp_processor_id();
1050 cpu_set(me, cpu_online_map);
1051 cpu_set(me, cpu_callout_map);
1052 cpu_set(0, cpu_sibling_map[0]);
1053 cpu_set(0, cpu_core_map[0]);
1054 per_cpu(cpu_state, me) = CPU_ONLINE;
1058 * Entry point to boot a CPU.
1060 int __cpuinit __cpu_up(unsigned int cpu)
1063 int apicid = cpu_present_to_apicid(cpu);
1065 WARN_ON(irqs_disabled());
1067 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
1069 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
1070 !physid_isset(apicid, phys_cpu_present_map)) {
1071 printk("__cpu_up: bad cpu %d\n", cpu);
1076 * Already booted CPU?
1078 if (cpu_isset(cpu, cpu_callin_map)) {
1079 Dprintk("do_boot_cpu %d Already started\n", cpu);
1083 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1085 err = do_boot_cpu(cpu, apicid);
1087 Dprintk("do_boot_cpu failed %d\n", err);
1091 /* Unleash the CPU! */
1092 Dprintk("waiting for cpu %d\n", cpu);
1094 while (!cpu_isset(cpu, cpu_online_map))
1102 * Finish the SMP boot.
1104 void __init smp_cpus_done(unsigned int max_cpus)
1106 #ifndef CONFIG_HOTPLUG_CPU
1111 #ifdef CONFIG_X86_IO_APIC
1112 setup_ioapic_dest();
1117 check_nmi_watchdog();
1120 #ifdef CONFIG_HOTPLUG_CPU
1122 static void remove_siblinginfo(int cpu)
1126 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1127 cpu_clear(cpu, cpu_sibling_map[sibling]);
1128 for_each_cpu_mask(sibling, cpu_core_map[cpu])
1129 cpu_clear(cpu, cpu_core_map[sibling]);
1130 cpus_clear(cpu_sibling_map[cpu]);
1131 cpus_clear(cpu_core_map[cpu]);
1132 phys_proc_id[cpu] = BAD_APICID;
1133 cpu_core_id[cpu] = BAD_APICID;
1136 void remove_cpu_from_maps(void)
1138 int cpu = smp_processor_id();
1140 cpu_clear(cpu, cpu_callout_map);
1141 cpu_clear(cpu, cpu_callin_map);
1142 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
1145 int __cpu_disable(void)
1147 int cpu = smp_processor_id();
1150 * Perhaps use cpufreq to drop frequency, but that could go
1151 * into generic code.
1153 * We won't take down the boot processor on i386 due to some
1154 * interrupts only being able to be serviced by the BSP.
1155 * Especially so if we're not using an IOAPIC -zwane
1160 disable_APIC_timer();
1164 * Allow any queued timer interrupts to get serviced
1165 * This is only a temporary solution until we cleanup
1166 * fixup_irqs as we do for IA64.
1171 local_irq_disable();
1172 remove_siblinginfo(cpu);
1174 /* It's now safe to remove this processor from the online map */
1175 cpu_clear(cpu, cpu_online_map);
1176 remove_cpu_from_maps();
1177 fixup_irqs(cpu_online_map);
1181 void __cpu_die(unsigned int cpu)
1183 /* We don't do anything here: idle task is faking death itself. */
1186 for (i = 0; i < 10; i++) {
1187 /* They ack this in play_dead by setting CPU_DEAD */
1188 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1189 printk ("CPU %d is now offline\n", cpu);
1192 current->state = TASK_UNINTERRUPTIBLE;
1193 schedule_timeout(HZ/10);
1195 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1198 #else /* ... !CONFIG_HOTPLUG_CPU */
1200 int __cpu_disable(void)
1205 void __cpu_die(unsigned int cpu)
1207 /* We said "no" in __cpu_disable */
1210 #endif /* CONFIG_HOTPLUG_CPU */