2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
33 #include <asm/atomic.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/mmu_context.h>
40 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
41 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
42 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
43 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
44 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
46 EXPORT_SYMBOL(phys_cpu_present_map);
47 EXPORT_SYMBOL(cpu_online_map);
49 static void smp_tune_scheduling (void)
51 struct cache_desc *cd = ¤t_cpu_data.scache;
52 unsigned long cachesize; /* kB */
53 unsigned long cpu_khz;
56 * Crude estimate until we actually meassure ...
58 cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
61 * Rough estimation for SMP scheduling, this is the number of
62 * cycles it takes for a fully memory-limited process to flush
63 * the SMP-local cache.
65 * (For a P5 this pretty much means we will choose another idle
66 * CPU almost always at wakeup time (this is due to the small
67 * L1 cache), on PIIs it's around 50-100 usecs, depending on
73 cachesize = cd->linesz * cd->sets * cd->ways;
76 extern void __init calibrate_delay(void);
77 extern ATTRIB_NORET void cpu_idle(void);
80 * First C code run on the secondary CPUs after being started up by
83 asmlinkage void start_secondary(void)
85 unsigned int cpu = smp_processor_id();
90 prom_init_secondary();
93 * XXX parity protection should be folded in here when it's converted
94 * to an option instead of something based on .cputype
98 cpu_data[cpu].udelay_val = loops_per_jiffy;
102 cpu_set(cpu, cpu_callin_map);
107 DEFINE_SPINLOCK(smp_call_lock);
109 struct call_data_struct *call_data;
112 * Run a function on all other CPUs.
113 * <func> The function to run. This must be fast and non-blocking.
114 * <info> An arbitrary pointer to pass to the function.
115 * <retry> If true, keep retrying until ready.
116 * <wait> If true, wait until function has completed on other CPUs.
117 * [RETURNS] 0 on success, else a negative status code.
119 * Does not return until remote CPUs are nearly ready to execute <func>
120 * or are or have executed.
122 * You must not call this function with disabled interrupts or from a
123 * hardware interrupt handler or from a bottom half handler:
127 * smp_call_function()
130 * Wait for all cpus to acknowledge IPI
131 * CPU A has not responded, spin waiting
132 * for cpu A to respond, holding call_lock
133 * smp_call_function()
134 * Spin waiting for call_lock
137 int smp_call_function (void (*func) (void *info), void *info, int retry,
140 struct call_data_struct data;
141 int i, cpus = num_online_cpus() - 1;
142 int cpu = smp_processor_id();
145 * Can die spectacularly if this CPU isn't yet marked online
147 BUG_ON(!cpu_online(cpu));
152 /* Can deadlock when called with interrupts disabled */
153 WARN_ON(irqs_disabled());
157 atomic_set(&data.started, 0);
160 atomic_set(&data.finished, 0);
162 spin_lock(&smp_call_lock);
166 /* Send a message to all other CPUs and wait for them to respond */
167 for (i = 0; i < NR_CPUS; i++)
168 if (cpu_online(i) && i != cpu)
169 core_send_ipi(i, SMP_CALL_FUNCTION);
171 /* Wait for response */
172 /* FIXME: lock-up detection, backtrace on lock-up */
173 while (atomic_read(&data.started) != cpus)
177 while (atomic_read(&data.finished) != cpus)
179 spin_unlock(&smp_call_lock);
184 void smp_call_function_interrupt(void)
186 void (*func) (void *info) = call_data->func;
187 void *info = call_data->info;
188 int wait = call_data->wait;
191 * Notify initiating CPU that I've grabbed the data and am
192 * about to execute the function.
195 atomic_inc(&call_data->started);
198 * At this point the info structure may be out of scope unless wait==1.
206 atomic_inc(&call_data->finished);
210 static void stop_this_cpu(void *dummy)
215 cpu_clear(smp_processor_id(), cpu_online_map);
216 local_irq_enable(); /* May need to service _machine_restart IPI */
217 for (;;); /* Wait if available. */
220 void smp_send_stop(void)
222 smp_call_function(stop_this_cpu, NULL, 1, 0);
225 void __init smp_cpus_done(unsigned int max_cpus)
230 /* called from main before smp_init() */
231 void __init smp_prepare_cpus(unsigned int max_cpus)
233 init_new_context(current, &init_mm);
234 current_thread_info()->cpu = 0;
235 smp_tune_scheduling();
236 prom_prepare_cpus(max_cpus);
239 /* preload SMP state for boot cpu */
240 void __devinit smp_prepare_boot_cpu(void)
243 * This assumes that bootup is always handled by the processor
244 * with the logic and physical number 0.
246 __cpu_number_map[0] = 0;
247 __cpu_logical_map[0] = 0;
248 cpu_set(0, phys_cpu_present_map);
249 cpu_set(0, cpu_online_map);
250 cpu_set(0, cpu_callin_map);
254 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
255 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
256 * physical, not logical.
258 int __devinit __cpu_up(unsigned int cpu)
260 struct task_struct *idle;
263 * Processor goes to start_secondary(), sets online flag
264 * The following code is purely to make sure
265 * Linux can schedule processes on this slave.
267 idle = fork_idle(cpu);
269 panic(KERN_ERR "Fork failed for CPU %d", cpu);
271 prom_boot_secondary(cpu, idle);
274 * Trust is futile. We should really have timeouts ...
276 while (!cpu_isset(cpu, cpu_callin_map))
279 cpu_set(cpu, cpu_online_map);
284 /* Not really SMP stuff ... */
285 int setup_profiling_timer(unsigned int multiplier)
290 static void flush_tlb_all_ipi(void *info)
292 local_flush_tlb_all();
295 void flush_tlb_all(void)
297 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
300 static void flush_tlb_mm_ipi(void *mm)
302 local_flush_tlb_mm((struct mm_struct *)mm);
306 * The following tlb flush calls are invoked when old translations are
307 * being torn down, or pte attributes are changing. For single threaded
308 * address spaces, a new context is obtained on the current cpu, and tlb
309 * context on other cpus are invalidated to force a new context allocation
310 * at switch_mm time, should the mm ever be used on other cpus. For
311 * multithreaded address spaces, intercpu interrupts have to be sent.
312 * Another case where intercpu interrupts are required is when the target
313 * mm might be active on another cpu (eg debuggers doing the flushes on
314 * behalf of debugees, kswapd stealing pages from another process etc).
318 void flush_tlb_mm(struct mm_struct *mm)
322 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
323 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
326 for (i = 0; i < num_online_cpus(); i++)
327 if (smp_processor_id() != i)
328 cpu_context(i, mm) = 0;
330 local_flush_tlb_mm(mm);
335 struct flush_tlb_data {
336 struct vm_area_struct *vma;
341 static void flush_tlb_range_ipi(void *info)
343 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
345 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
348 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
350 struct mm_struct *mm = vma->vm_mm;
353 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
354 struct flush_tlb_data fd;
359 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
362 for (i = 0; i < num_online_cpus(); i++)
363 if (smp_processor_id() != i)
364 cpu_context(i, mm) = 0;
366 local_flush_tlb_range(vma, start, end);
370 static void flush_tlb_kernel_range_ipi(void *info)
372 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
374 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
377 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
379 struct flush_tlb_data fd;
383 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
386 static void flush_tlb_page_ipi(void *info)
388 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
390 local_flush_tlb_page(fd->vma, fd->addr1);
393 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
396 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
397 struct flush_tlb_data fd;
401 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
404 for (i = 0; i < num_online_cpus(); i++)
405 if (smp_processor_id() != i)
406 cpu_context(i, vma->vm_mm) = 0;
408 local_flush_tlb_page(vma, page);
412 static void flush_tlb_one_ipi(void *info)
414 unsigned long vaddr = (unsigned long) info;
416 local_flush_tlb_one(vaddr);
419 void flush_tlb_one(unsigned long vaddr)
421 smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
422 local_flush_tlb_one(vaddr);
425 EXPORT_SYMBOL(flush_tlb_page);
426 EXPORT_SYMBOL(flush_tlb_one);
427 EXPORT_SYMBOL(cpu_data);
428 EXPORT_SYMBOL(synchronize_irq);