2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/irq.h>
26 #include <linux/cache.h>
27 #include <linux/profile.h>
28 #include <linux/bitops.h>
30 #include <asm/hwrpb.h>
31 #include <asm/ptrace.h>
32 #include <asm/atomic.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/tlbflush.h>
47 #define DBGS(args) printk args
52 /* A collection of per-processor data. */
53 struct cpuinfo_alpha cpu_data[NR_CPUS];
54 EXPORT_SYMBOL(cpu_data);
56 /* A collection of single bit ipi messages. */
58 unsigned long bits ____cacheline_aligned;
59 } ipi_data[NR_CPUS] __cacheline_aligned;
61 enum ipi_message_type {
67 /* Set to a secondary's cpuid when it comes online. */
68 static int smp_secondary_alive __initdata = 0;
70 /* Which cpus ids came online. */
71 cpumask_t cpu_online_map;
73 EXPORT_SYMBOL(cpu_online_map);
75 int smp_num_probed; /* Internal processor count */
76 int smp_num_cpus = 1; /* Number that came online. */
77 EXPORT_SYMBOL(smp_num_cpus);
79 extern void calibrate_delay(void);
84 * Called by both boot and secondaries to move global data into
85 * per-processor storage.
87 static inline void __init
88 smp_store_cpu_info(int cpuid)
90 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
91 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
92 cpu_data[cpuid].need_new_asn = 0;
93 cpu_data[cpuid].asn_lock = 0;
97 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
99 static inline void __init
100 smp_setup_percpu_timer(int cpuid)
102 cpu_data[cpuid].prof_counter = 1;
103 cpu_data[cpuid].prof_multiplier = 1;
107 wait_boot_cpu_to_stop(int cpuid)
109 unsigned long stop = jiffies + 10*HZ;
111 while (time_before(jiffies, stop)) {
112 if (!smp_secondary_alive)
117 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
123 * Where secondaries begin a life of C.
128 int cpuid = hard_smp_processor_id();
130 if (cpu_test_and_set(cpuid, cpu_online_map)) {
131 printk("??, cpu 0x%x already present??\n", cpuid);
135 /* Turn on machine checks. */
138 /* Set trap vectors. */
141 /* Set interrupt vector. */
144 /* Get our local ticker going. */
145 smp_setup_percpu_timer(cpuid);
147 /* Call platform-specific callin, if specified */
148 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
150 /* All kernel threads share the same mm context. */
151 atomic_inc(&init_mm.mm_count);
152 current->active_mm = &init_mm;
154 /* Must have completely accurate bogos. */
157 /* Wait boot CPU to stop with irq enabled before running
159 wait_boot_cpu_to_stop(cpuid);
163 smp_store_cpu_info(cpuid);
164 /* Allow master to continue only after we written loops_per_jiffy. */
166 smp_secondary_alive = 1;
168 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
169 cpuid, current, current->active_mm));
175 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
177 wait_for_txrdy (unsigned long cpumask)
179 unsigned long timeout;
181 if (!(hwrpb->txrdy & cpumask))
184 timeout = jiffies + 10*HZ;
185 while (time_before(jiffies, timeout)) {
186 if (!(hwrpb->txrdy & cpumask))
196 * Send a message to a secondary's console. "START" is one such
197 * interesting message. ;-)
200 send_secondary_console_msg(char *str, int cpuid)
202 struct percpu_struct *cpu;
203 register char *cp1, *cp2;
204 unsigned long cpumask;
207 cpu = (struct percpu_struct *)
209 + hwrpb->processor_offset
210 + cpuid * hwrpb->processor_size);
212 cpumask = (1UL << cpuid);
213 if (wait_for_txrdy(cpumask))
218 *(unsigned int *)&cpu->ipc_buffer[0] = len;
219 cp1 = (char *) &cpu->ipc_buffer[1];
220 memcpy(cp1, cp2, len);
222 /* atomic test and set */
224 set_bit(cpuid, &hwrpb->rxrdy);
226 if (wait_for_txrdy(cpumask))
231 printk("Processor %x not ready\n", cpuid);
235 * A secondary console wants to send a message. Receive it.
238 recv_secondary_console_msg(void)
241 unsigned long txrdy = hwrpb->txrdy;
242 char *cp1, *cp2, buf[80];
243 struct percpu_struct *cpu;
245 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
247 mycpu = hard_smp_processor_id();
249 for (i = 0; i < NR_CPUS; i++) {
250 if (!(txrdy & (1UL << i)))
253 DBGS(("recv_secondary_console_msg: "
254 "TXRDY contains CPU %d.\n", i));
256 cpu = (struct percpu_struct *)
258 + hwrpb->processor_offset
259 + i * hwrpb->processor_size);
261 DBGS(("recv_secondary_console_msg: on %d from %d"
262 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
263 mycpu, i, cpu->halt_reason, cpu->flags));
265 cnt = cpu->ipc_buffer[0] >> 32;
266 if (cnt <= 0 || cnt >= 80)
267 strcpy(buf, "<<< BOGUS MSG >>>");
269 cp1 = (char *) &cpu->ipc_buffer[11];
273 while ((cp2 = strchr(cp2, '\r')) != 0) {
280 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
281 "message is '%s'\n", mycpu, buf));
288 * Convince the console to have a secondary cpu begin execution.
291 secondary_cpu_start(int cpuid, struct task_struct *idle)
293 struct percpu_struct *cpu;
294 struct pcb_struct *hwpcb, *ipcb;
295 unsigned long timeout;
297 cpu = (struct percpu_struct *)
299 + hwrpb->processor_offset
300 + cpuid * hwrpb->processor_size);
301 hwpcb = (struct pcb_struct *) cpu->hwpcb;
302 ipcb = &task_thread_info(idle)->pcb;
304 /* Initialize the CPU's HWPCB to something just good enough for
305 us to get started. Immediately after starting, we'll swpctx
306 to the target idle task's pcb. Reuse the stack in the mean
307 time. Precalculate the target PCBB. */
308 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
310 hwpcb->ptbr = ipcb->ptbr;
313 hwpcb->unique = virt_to_phys(ipcb);
314 hwpcb->flags = ipcb->flags;
315 hwpcb->res1 = hwpcb->res2 = 0;
318 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
319 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
321 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
322 cpuid, idle->state, ipcb->flags));
324 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
325 hwrpb->CPU_restart = __smp_callin;
326 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
328 /* Recalculate and update the HWRPB checksum */
329 hwrpb_update_checksum(hwrpb);
332 * Send a "start" command to the specified processor.
335 /* SRM III 3.4.1.3 */
336 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
337 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
340 send_secondary_console_msg("START\r\n", cpuid);
342 /* Wait 10 seconds for an ACK from the console. */
343 timeout = jiffies + 10*HZ;
344 while (time_before(jiffies, timeout)) {
350 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
354 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
359 * Bring one cpu online.
362 smp_boot_one_cpu(int cpuid)
364 struct task_struct *idle;
365 unsigned long timeout;
367 /* Cook up an idler for this guy. Note that the address we
368 give to kernel_thread is irrelevant -- it's going to start
369 where HWRPB.CPU_restart says to start. But this gets all
370 the other task-y sort of data structures set up like we
371 wish. We can't use kernel_thread since we must avoid
372 rescheduling the child. */
373 idle = fork_idle(cpuid);
375 panic("failed fork for CPU %d", cpuid);
377 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
378 cpuid, idle->state, idle->flags));
380 /* Signal the secondary to wait a moment. */
381 smp_secondary_alive = -1;
383 /* Whirrr, whirrr, whirrrrrrrrr... */
384 if (secondary_cpu_start(cpuid, idle))
387 /* Notify the secondary CPU it can run calibrate_delay. */
389 smp_secondary_alive = 0;
391 /* We've been acked by the console; wait one second for
392 the task to start up for real. */
393 timeout = jiffies + 1*HZ;
394 while (time_before(jiffies, timeout)) {
395 if (smp_secondary_alive == 1)
401 /* We failed to boot the CPU. */
403 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
407 /* Another "Red Snapper". */
412 * Called from setup_arch. Detect an SMP system and which processors
418 struct percpu_struct *cpubase, *cpu;
421 if (boot_cpuid != 0) {
422 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
426 if (hwrpb->nr_processors > 1) {
429 DBGS(("setup_smp: nr_processors %ld\n",
430 hwrpb->nr_processors));
432 cpubase = (struct percpu_struct *)
433 ((char*)hwrpb + hwrpb->processor_offset);
434 boot_cpu_palrev = cpubase->pal_revision;
436 for (i = 0; i < hwrpb->nr_processors; i++) {
437 cpu = (struct percpu_struct *)
438 ((char *)cpubase + i*hwrpb->processor_size);
439 if ((cpu->flags & 0x1cc) == 0x1cc) {
441 /* Assume here that "whami" == index */
442 cpu_set(i, cpu_present_map);
443 cpu->pal_revision = boot_cpu_palrev;
446 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
447 i, cpu->flags, cpu->type));
448 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
449 i, cpu->pal_revision));
455 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
456 smp_num_probed, cpu_present_map.bits[0]);
460 * Called by smp_init prepare the secondaries
463 smp_prepare_cpus(unsigned int max_cpus)
465 /* Take care of some initial bookkeeping. */
466 memset(ipi_data, 0, sizeof(ipi_data));
468 current_thread_info()->cpu = boot_cpuid;
470 smp_store_cpu_info(boot_cpuid);
471 smp_setup_percpu_timer(boot_cpuid);
473 /* Nothing to do on a UP box, or when told not to. */
474 if (smp_num_probed == 1 || max_cpus == 0) {
475 cpu_present_map = cpumask_of_cpu(boot_cpuid);
476 printk(KERN_INFO "SMP mode deactivated.\n");
480 printk(KERN_INFO "SMP starting up secondaries.\n");
482 smp_num_cpus = smp_num_probed;
486 smp_prepare_boot_cpu(void)
491 __cpu_up(unsigned int cpu)
493 smp_boot_one_cpu(cpu);
495 return cpu_online(cpu) ? 0 : -ENOSYS;
499 smp_cpus_done(unsigned int max_cpus)
502 unsigned long bogosum = 0;
504 for(cpu = 0; cpu < NR_CPUS; cpu++)
506 bogosum += cpu_data[cpu].loops_per_jiffy;
508 printk(KERN_INFO "SMP: Total of %d processors activated "
509 "(%lu.%02lu BogoMIPS).\n",
511 (bogosum + 2500) / (500000/HZ),
512 ((bogosum + 2500) / (5000/HZ)) % 100);
517 smp_percpu_timer_interrupt(struct pt_regs *regs)
519 struct pt_regs *old_regs;
520 int cpu = smp_processor_id();
521 unsigned long user = user_mode(regs);
522 struct cpuinfo_alpha *data = &cpu_data[cpu];
524 old_regs = set_irq_regs(regs);
526 /* Record kernel PC. */
527 profile_tick(CPU_PROFILING);
529 if (!--data->prof_counter) {
530 /* We need to make like a normal interrupt -- otherwise
531 timer interrupts ignore the global interrupt lock,
532 which would be a Bad Thing. */
535 update_process_times(user);
537 data->prof_counter = data->prof_multiplier;
541 set_irq_regs(old_regs);
545 setup_profiling_timer(unsigned int multiplier)
552 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
557 for_each_cpu_mask(i, to_whom)
558 set_bit(operation, &ipi_data[i].bits);
561 for_each_cpu_mask(i, to_whom)
565 /* Structure and data for smp_call_function. This is designed to
566 minimize static memory requirements. Plus it looks cleaner. */
568 struct smp_call_struct {
569 void (*func) (void *info);
572 atomic_t unstarted_count;
573 atomic_t unfinished_count;
576 static struct smp_call_struct *smp_call_function_data;
578 /* Atomicly drop data into a shared pointer. The pointer is free if
579 it is initially locked. If retry, spin until free. */
582 pointer_lock (void *lock, void *data, int retry)
588 /* Compare and swap with zero. */
596 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
605 while (*(void **)lock)
611 handle_ipi(struct pt_regs *regs)
613 int this_cpu = smp_processor_id();
614 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
618 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
619 this_cpu, *pending_ipis, regs->pc));
622 mb(); /* Order interrupt and bit testing. */
623 while ((ops = xchg(pending_ipis, 0)) != 0) {
624 mb(); /* Order bit clearing and data access. */
630 which = __ffs(which);
634 /* Reschedule callback. Everything to be done
635 is done by the interrupt return path. */
640 struct smp_call_struct *data;
641 void (*func)(void *info);
645 data = smp_call_function_data;
650 /* Notify the sending CPU that the data has been
651 received, and execution is about to begin. */
653 atomic_dec (&data->unstarted_count);
655 /* At this point the structure may be gone unless
659 /* Notify the sending CPU that the task is done. */
661 if (wait) atomic_dec (&data->unfinished_count);
669 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
675 mb(); /* Order data access and bit testing. */
678 cpu_data[this_cpu].ipi_count++;
681 recv_secondary_console_msg();
685 smp_send_reschedule(int cpu)
688 if (cpu == hard_smp_processor_id())
690 "smp_send_reschedule: Sending IPI to self.\n");
692 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
698 cpumask_t to_whom = cpu_possible_map;
699 cpu_clear(smp_processor_id(), to_whom);
701 if (hard_smp_processor_id() != boot_cpu_id)
702 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
704 send_ipi_message(to_whom, IPI_CPU_STOP);
708 * Run a function on all other CPUs.
709 * <func> The function to run. This must be fast and non-blocking.
710 * <info> An arbitrary pointer to pass to the function.
711 * <retry> If true, keep retrying until ready.
712 * <wait> If true, wait until function has completed on other CPUs.
713 * [RETURNS] 0 on success, else a negative status code.
715 * Does not return until remote CPUs are nearly ready to execute <func>
716 * or are or have executed.
717 * You must not call this function with disabled interrupts or from a
718 * hardware interrupt handler or from a bottom half handler.
722 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
723 int wait, cpumask_t to_whom)
725 struct smp_call_struct data;
726 unsigned long timeout;
727 int num_cpus_to_call;
729 /* Can deadlock when called with interrupts disabled */
730 WARN_ON(irqs_disabled());
736 cpu_clear(smp_processor_id(), to_whom);
737 num_cpus_to_call = cpus_weight(to_whom);
739 atomic_set(&data.unstarted_count, num_cpus_to_call);
740 atomic_set(&data.unfinished_count, num_cpus_to_call);
742 /* Acquire the smp_call_function_data mutex. */
743 if (pointer_lock(&smp_call_function_data, &data, retry))
746 /* Send a message to the requested CPUs. */
747 send_ipi_message(to_whom, IPI_CALL_FUNC);
749 /* Wait for a minimal response. */
750 timeout = jiffies + HZ;
751 while (atomic_read (&data.unstarted_count) > 0
752 && time_before (jiffies, timeout))
755 /* If there's no response yet, log a message but allow a longer
756 * timeout period -- if we get a response this time, log
757 * a message saying when we got it..
759 if (atomic_read(&data.unstarted_count) > 0) {
760 long start_time = jiffies;
761 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
763 timeout = jiffies + 30 * HZ;
764 while (atomic_read(&data.unstarted_count) > 0
765 && time_before(jiffies, timeout))
767 if (atomic_read(&data.unstarted_count) <= 0) {
768 long delta = jiffies - start_time;
770 "%s: response %ld.%ld seconds into long wait\n",
771 __FUNCTION__, delta / HZ,
772 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
776 /* We either got one or timed out -- clear the lock. */
778 smp_call_function_data = NULL;
781 * If after both the initial and long timeout periods we still don't
782 * have a response, something is very wrong...
784 BUG_ON(atomic_read (&data.unstarted_count) > 0);
786 /* Wait for a complete response, if needed. */
788 while (atomic_read (&data.unfinished_count) > 0)
794 EXPORT_SYMBOL(smp_call_function_on_cpu);
797 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
799 return smp_call_function_on_cpu (func, info, retry, wait,
802 EXPORT_SYMBOL(smp_call_function);
805 ipi_imb(void *ignored)
813 /* Must wait other processors to flush their icache before continue. */
814 if (on_each_cpu(ipi_imb, NULL, 1, 1))
815 printk(KERN_CRIT "smp_imb: timed out\n");
817 EXPORT_SYMBOL(smp_imb);
820 ipi_flush_tlb_all(void *ignored)
828 /* Although we don't have any data to pass, we do want to
829 synchronize with the other processors. */
830 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
831 printk(KERN_CRIT "flush_tlb_all: timed out\n");
835 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
838 ipi_flush_tlb_mm(void *x)
840 struct mm_struct *mm = (struct mm_struct *) x;
841 if (mm == current->active_mm && !asn_locked())
842 flush_tlb_current(mm);
848 flush_tlb_mm(struct mm_struct *mm)
852 if (mm == current->active_mm) {
853 flush_tlb_current(mm);
854 if (atomic_read(&mm->mm_users) <= 1) {
855 int cpu, this_cpu = smp_processor_id();
856 for (cpu = 0; cpu < NR_CPUS; cpu++) {
857 if (!cpu_online(cpu) || cpu == this_cpu)
859 if (mm->context[cpu])
860 mm->context[cpu] = 0;
867 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
868 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
873 EXPORT_SYMBOL(flush_tlb_mm);
875 struct flush_tlb_page_struct {
876 struct vm_area_struct *vma;
877 struct mm_struct *mm;
882 ipi_flush_tlb_page(void *x)
884 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
885 struct mm_struct * mm = data->mm;
887 if (mm == current->active_mm && !asn_locked())
888 flush_tlb_current_page(mm, data->vma, data->addr);
894 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
896 struct flush_tlb_page_struct data;
897 struct mm_struct *mm = vma->vm_mm;
901 if (mm == current->active_mm) {
902 flush_tlb_current_page(mm, vma, addr);
903 if (atomic_read(&mm->mm_users) <= 1) {
904 int cpu, this_cpu = smp_processor_id();
905 for (cpu = 0; cpu < NR_CPUS; cpu++) {
906 if (!cpu_online(cpu) || cpu == this_cpu)
908 if (mm->context[cpu])
909 mm->context[cpu] = 0;
920 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
921 printk(KERN_CRIT "flush_tlb_page: timed out\n");
926 EXPORT_SYMBOL(flush_tlb_page);
929 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
931 /* On the Alpha we always flush the whole user tlb. */
932 flush_tlb_mm(vma->vm_mm);
934 EXPORT_SYMBOL(flush_tlb_range);
937 ipi_flush_icache_page(void *x)
939 struct mm_struct *mm = (struct mm_struct *) x;
940 if (mm == current->active_mm && !asn_locked())
941 __load_new_mm_context(mm);
947 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
948 unsigned long addr, int len)
950 struct mm_struct *mm = vma->vm_mm;
952 if ((vma->vm_flags & VM_EXEC) == 0)
957 if (mm == current->active_mm) {
958 __load_new_mm_context(mm);
959 if (atomic_read(&mm->mm_users) <= 1) {
960 int cpu, this_cpu = smp_processor_id();
961 for (cpu = 0; cpu < NR_CPUS; cpu++) {
962 if (!cpu_online(cpu) || cpu == this_cpu)
964 if (mm->context[cpu])
965 mm->context[cpu] = 0;
972 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
973 printk(KERN_CRIT "flush_icache_page: timed out\n");