2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
26 #include <asm/atomic.h>
27 #include <asm/cacheflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/processor.h>
33 #include <asm/tlbflush.h>
34 #include <asm/ptrace.h>
37 * bitmask of present and online CPUs.
38 * The present bitmask indicates that the CPU is physically present.
39 * The online bitmask indicates that the CPU is up and running.
41 cpumask_t cpu_possible_map;
42 EXPORT_SYMBOL(cpu_possible_map);
43 cpumask_t cpu_online_map;
44 EXPORT_SYMBOL(cpu_online_map);
47 * as from 2.5, kernels no longer have an init_tasks structure
48 * so we need some other way of telling a new secondary core
49 * where to place its SVC stack
51 struct secondary_data secondary_data;
54 * structures for inter-processor calls
55 * - A collection of single bit ipi messages.
59 unsigned long ipi_count;
63 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
64 .lock = SPIN_LOCK_UNLOCKED,
74 struct smp_call_struct {
75 void (*func)(void *info);
82 static struct smp_call_struct * volatile smp_call_function_data;
83 static DEFINE_SPINLOCK(smp_call_function_lock);
85 int __cpuinit __cpu_up(unsigned int cpu)
87 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
88 struct task_struct *idle = ci->idle;
94 * Spawn a new process manually, if not already done.
95 * Grab a pointer to its task struct so we can mess with it
98 idle = fork_idle(cpu);
100 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
101 return PTR_ERR(idle);
107 * Allocate initial page tables to allow the new CPU to
108 * enable the MMU safely. This essentially means a set
109 * of our "standard" page tables, with the addition of
110 * a 1:1 mapping for the physical address of the kernel.
112 pgd = pgd_alloc(&init_mm);
113 pmd = pmd_offset(pgd, PHYS_OFFSET);
114 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
115 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
118 * We need to tell the secondary core where to find
119 * its stack and the page tables.
121 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
122 secondary_data.pgdir = virt_to_phys(pgd);
126 * Now bring the CPU into our world.
128 ret = boot_secondary(cpu, idle);
130 unsigned long timeout;
133 * CPU was successfully started, wait for it
134 * to come online or time out.
136 timeout = jiffies + HZ;
137 while (time_before(jiffies, timeout)) {
145 if (!cpu_online(cpu))
149 secondary_data.stack = NULL;
150 secondary_data.pgdir = 0;
152 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
156 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
159 * FIXME: We need to clean up the new idle thread. --rmk
166 #ifdef CONFIG_HOTPLUG_CPU
168 * __cpu_disable runs on the processor to be shutdown.
170 int __cpuexit __cpu_disable(void)
172 unsigned int cpu = smp_processor_id();
173 struct task_struct *p;
176 ret = mach_cpu_disable(cpu);
181 * Take this CPU offline. Once we clear this, we can't return,
182 * and we must not schedule until we're ready to give up the cpu.
184 cpu_clear(cpu, cpu_online_map);
187 * OK - migrate IRQs away from this CPU
192 * Stop the local timer for this CPU.
194 local_timer_stop(cpu);
197 * Flush user cache and TLB mappings, and then remove this CPU
198 * from the vm mask set of all processes.
201 local_flush_tlb_all();
203 read_lock(&tasklist_lock);
204 for_each_process(p) {
206 cpu_clear(cpu, p->mm->cpu_vm_mask);
208 read_unlock(&tasklist_lock);
214 * called on the thread which is asking for a CPU to be shutdown -
215 * waits until shutdown has completed, or it is timed out.
217 void __cpuexit __cpu_die(unsigned int cpu)
219 if (!platform_cpu_kill(cpu))
220 printk("CPU%u: unable to kill\n", cpu);
224 * Called from the idle thread for the CPU which has been shutdown.
226 * Note that we disable IRQs here, but do not re-enable them
227 * before returning to the caller. This is also the behaviour
228 * of the other hotplug-cpu capable cores, so presumably coming
229 * out of idle fixes this.
231 void __cpuexit cpu_die(void)
233 unsigned int cpu = smp_processor_id();
239 * actual CPU shutdown procedure is at least platform (if not
242 platform_cpu_die(cpu);
245 * Do not return to the idle loop - jump back to the secondary
246 * cpu initialisation. There's some initialisation which needs
247 * to be repeated to undo the effects of taking the CPU offline.
249 __asm__("mov sp, %0\n"
250 " b secondary_start_kernel"
252 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
254 #endif /* CONFIG_HOTPLUG_CPU */
257 * This is the secondary CPU boot entry. We're using this CPUs
258 * idle thread stack, but a set of temporary page tables.
260 asmlinkage void __cpuinit secondary_start_kernel(void)
262 struct mm_struct *mm = &init_mm;
263 unsigned int cpu = smp_processor_id();
265 printk("CPU%u: Booted secondary processor\n", cpu);
268 * All kernel threads share the same mm context; grab a
269 * reference and switch to it.
271 atomic_inc(&mm->mm_users);
272 atomic_inc(&mm->mm_count);
273 current->active_mm = mm;
274 cpu_set(cpu, mm->cpu_vm_mask);
275 cpu_switch_mm(mm->pgd, mm);
276 enter_lazy_tlb(mm, current);
277 local_flush_tlb_all();
283 * Give the platform a chance to do its own initialisation.
285 platform_secondary_init(cpu);
288 * Enable local interrupts.
295 smp_store_cpu_info(cpu);
298 * OK, now it's safe to let the boot CPU continue
300 cpu_set(cpu, cpu_online_map);
303 * Setup local timer for this CPU.
305 local_timer_setup(cpu);
308 * OK, it's off to the idle thread for us
314 * Called by both boot and secondaries to move global data into
315 * per-processor storage.
317 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
319 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
321 cpu_info->loops_per_jiffy = loops_per_jiffy;
324 void __init smp_cpus_done(unsigned int max_cpus)
327 unsigned long bogosum = 0;
329 for_each_online_cpu(cpu)
330 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
332 printk(KERN_INFO "SMP: Total of %d processors activated "
333 "(%lu.%02lu BogoMIPS).\n",
335 bogosum / (500000/HZ),
336 (bogosum / (5000/HZ)) % 100);
339 void __init smp_prepare_boot_cpu(void)
341 unsigned int cpu = smp_processor_id();
343 per_cpu(cpu_data, cpu).idle = current;
346 static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
351 local_irq_save(flags);
353 for_each_cpu_mask(cpu, callmap) {
354 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
356 spin_lock(&ipi->lock);
357 ipi->bits |= 1 << msg;
358 spin_unlock(&ipi->lock);
362 * Call the platform specific cross-CPU call function.
364 smp_cross_call(callmap);
366 local_irq_restore(flags);
370 * You must not call this function with disabled interrupts, from a
371 * hardware interrupt handler, nor from a bottom half handler.
373 static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
374 int retry, int wait, cpumask_t callmap)
376 struct smp_call_struct data;
377 unsigned long timeout;
384 cpu_clear(smp_processor_id(), callmap);
385 if (cpus_empty(callmap))
388 data.pending = callmap;
390 data.unfinished = callmap;
393 * try to get the mutex on smp_call_function_data
395 spin_lock(&smp_call_function_lock);
396 smp_call_function_data = &data;
398 send_ipi_message(callmap, IPI_CALL_FUNC);
400 timeout = jiffies + HZ;
401 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
407 if (!cpus_empty(data.pending)) {
409 * this may be causing our panic - report it
412 "CPU%u: smp_call_function timeout for %p(%p)\n"
413 " callmap %lx pending %lx, %swait\n",
414 smp_processor_id(), func, info, *cpus_addr(callmap),
415 *cpus_addr(data.pending), wait ? "" : "no ");
420 timeout = jiffies + (5 * HZ);
421 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
424 if (cpus_empty(data.pending))
425 printk(KERN_CRIT " RESOLVED\n");
427 printk(KERN_CRIT " STILL STUCK\n");
431 * whatever happened, we're done with the data, so release it
433 smp_call_function_data = NULL;
434 spin_unlock(&smp_call_function_lock);
436 if (!cpus_empty(data.pending)) {
442 while (!cpus_empty(data.unfinished))
449 int smp_call_function(void (*func)(void *info), void *info, int retry,
452 return smp_call_function_on_cpu(func, info, retry, wait,
455 EXPORT_SYMBOL_GPL(smp_call_function);
457 void show_ipi_list(struct seq_file *p)
463 for_each_present_cpu(cpu)
464 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
469 void show_local_irqs(struct seq_file *p)
473 seq_printf(p, "LOC: ");
475 for_each_present_cpu(cpu)
476 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
481 static void ipi_timer(void)
484 profile_tick(CPU_PROFILING);
485 update_process_times(user_mode(get_irq_regs()));
489 #ifdef CONFIG_LOCAL_TIMERS
490 asmlinkage void __exception do_local_timer(struct pt_regs *regs)
492 struct pt_regs *old_regs = set_irq_regs(regs);
493 int cpu = smp_processor_id();
495 if (local_timer_ack()) {
496 irq_stat[cpu].local_timer_irqs++;
500 set_irq_regs(old_regs);
505 * ipi_call_function - handle IPI from smp_call_function()
507 * Note that we copy data out of the cross-call structure and then
508 * let the caller know that we're here and have done with their data
510 static void ipi_call_function(unsigned int cpu)
512 struct smp_call_struct *data = smp_call_function_data;
513 void (*func)(void *info) = data->func;
514 void *info = data->info;
515 int wait = data->wait;
517 cpu_clear(cpu, data->pending);
522 cpu_clear(cpu, data->unfinished);
525 static DEFINE_SPINLOCK(stop_lock);
528 * ipi_cpu_stop - handle IPI from smp_send_stop()
530 static void ipi_cpu_stop(unsigned int cpu)
532 spin_lock(&stop_lock);
533 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
535 spin_unlock(&stop_lock);
537 cpu_clear(cpu, cpu_online_map);
547 * Main handler for inter-processor interrupts
549 * For ARM, the ipimask now only identifies a single
550 * category of IPI (Bit 1 IPIs have been replaced by a
551 * different mechanism):
553 * Bit 0 - Inter-processor function call
555 asmlinkage void __exception do_IPI(struct pt_regs *regs)
557 unsigned int cpu = smp_processor_id();
558 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
559 struct pt_regs *old_regs = set_irq_regs(regs);
566 spin_lock(&ipi->lock);
569 spin_unlock(&ipi->lock);
577 nextmsg = msgs & -msgs;
579 nextmsg = ffz(~nextmsg);
588 * nothing more to do - eveything is
589 * done on the interrupt return path
594 ipi_call_function(cpu);
602 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
609 set_irq_regs(old_regs);
612 void smp_send_reschedule(int cpu)
614 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
617 void smp_send_timer(void)
619 cpumask_t mask = cpu_online_map;
620 cpu_clear(smp_processor_id(), mask);
621 send_ipi_message(mask, IPI_TIMER);
624 void smp_send_stop(void)
626 cpumask_t mask = cpu_online_map;
627 cpu_clear(smp_processor_id(), mask);
628 send_ipi_message(mask, IPI_CPU_STOP);
634 int setup_profiling_timer(unsigned int multiplier)
640 on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
647 ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
648 if (cpu_isset(smp_processor_id(), mask))
656 /**********************************************************************/
662 struct vm_area_struct *ta_vma;
663 unsigned long ta_start;
664 unsigned long ta_end;
667 static inline void ipi_flush_tlb_all(void *ignored)
669 local_flush_tlb_all();
672 static inline void ipi_flush_tlb_mm(void *arg)
674 struct mm_struct *mm = (struct mm_struct *)arg;
676 local_flush_tlb_mm(mm);
679 static inline void ipi_flush_tlb_page(void *arg)
681 struct tlb_args *ta = (struct tlb_args *)arg;
683 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
686 static inline void ipi_flush_tlb_kernel_page(void *arg)
688 struct tlb_args *ta = (struct tlb_args *)arg;
690 local_flush_tlb_kernel_page(ta->ta_start);
693 static inline void ipi_flush_tlb_range(void *arg)
695 struct tlb_args *ta = (struct tlb_args *)arg;
697 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
700 static inline void ipi_flush_tlb_kernel_range(void *arg)
702 struct tlb_args *ta = (struct tlb_args *)arg;
704 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
707 void flush_tlb_all(void)
709 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1);
712 void flush_tlb_mm(struct mm_struct *mm)
714 cpumask_t mask = mm->cpu_vm_mask;
716 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
719 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
721 cpumask_t mask = vma->vm_mm->cpu_vm_mask;
727 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
730 void flush_tlb_kernel_page(unsigned long kaddr)
736 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1);
739 void flush_tlb_range(struct vm_area_struct *vma,
740 unsigned long start, unsigned long end)
742 cpumask_t mask = vma->vm_mm->cpu_vm_mask;
749 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
752 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
759 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1);