2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * This code is released under the GNU General Public License version 2 or
12 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/interrupt.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mach_apic.h>
26 #include <asm/mmu_context.h>
27 #include <asm/proto.h>
28 #include <asm/apicdef.h>
32 * Smarter SMP flushing macros.
35 * These mean you can really definitely utterly forget about
36 * writing to user space from interrupts. (Its not allowed anyway).
38 * Optimizations Manfred Spraul <manfred@colorfullife.com>
40 * More scalable flush, from Andi Kleen
42 * To avoid global state use 8 different call vectors.
43 * Each CPU uses a specific vector to trigger flushes on other
44 * CPUs. Depending on the received vector the target CPUs look into
45 * the right per cpu variable for the flush data.
47 * With more than 8 CPUs they are hashed to the 8 available
48 * vectors. The limited global vector space forces us to this right now.
49 * In future when interrupts are split into per CPU domains this could be
50 * fixed, at the cost of triggering multiple IPIs in some cases.
53 union smp_flush_state {
55 cpumask_t flush_cpumask;
56 struct mm_struct *flush_mm;
57 unsigned long flush_va;
58 #define FLUSH_ALL -1ULL
59 spinlock_t tlbstate_lock;
61 char pad[SMP_CACHE_BYTES];
62 } ____cacheline_aligned;
64 /* State is put into the per CPU data section, but padded
65 to a full cache line because other CPUs can access it and we don't
66 want false sharing in the per cpu data segment. */
67 static DEFINE_PER_CPU(union smp_flush_state, flush_state);
70 * We cannot call mmdrop() because we are in interrupt context,
71 * instead update mm->cpu_vm_mask.
73 static inline void leave_mm(int cpu)
75 if (read_pda(mmu_state) == TLBSTATE_OK)
77 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
78 load_cr3(swapper_pg_dir);
83 * The flush IPI assumes that a thread switch happens in this order:
84 * [cpu0: the cpu that switches]
85 * 1) switch_mm() either 1a) or 1b)
86 * 1a) thread switch to a different mm
87 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
88 * Stop ipi delivery for the old mm. This is not synchronized with
89 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
90 * for the wrong mm, and in the worst case we perform a superfluous
92 * 1a2) set cpu mmu_state to TLBSTATE_OK
93 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
94 * was in lazy tlb mode.
95 * 1a3) update cpu active_mm
96 * Now cpu0 accepts tlb flushes for the new mm.
97 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
98 * Now the other cpus will send tlb flush ipis.
100 * 1b) thread switch without mm change
101 * cpu active_mm is correct, cpu0 already handles
103 * 1b1) set cpu mmu_state to TLBSTATE_OK
104 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
105 * Atomically set the bit [other cpus will start sending flush ipis],
107 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
108 * 2) switch %%esp, ie current
110 * The interrupt must handle 2 special cases:
111 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
112 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
113 * runs in kernel space, the cpu could load tlb entries for user space
116 * The good news is that cpu mmu_state is local to each cpu, no
117 * write/read ordering problems.
123 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
124 * 2) Leave the mm if we are in the lazy tlb mode.
126 * Interrupts are disabled.
129 asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
133 union smp_flush_state *f;
135 cpu = smp_processor_id();
137 * orig_rax contains the negated interrupt vector.
138 * Use that to determine where the sender put the data.
140 sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
141 f = &per_cpu(flush_state, sender);
143 if (!cpu_isset(cpu, f->flush_cpumask))
146 * This was a BUG() but until someone can quote me the
147 * line from the intel manual that guarantees an IPI to
148 * multiple CPUs is retried _only_ on the erroring CPUs
149 * its staying as a return
154 if (f->flush_mm == read_pda(active_mm)) {
155 if (read_pda(mmu_state) == TLBSTATE_OK) {
156 if (f->flush_va == FLUSH_ALL)
159 __flush_tlb_one(f->flush_va);
165 cpu_clear(cpu, f->flush_cpumask);
166 add_pda(irq_tlb_count, 1);
169 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
173 union smp_flush_state *f;
175 /* Caller has disabled preemption */
176 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
177 f = &per_cpu(flush_state, sender);
179 /* Could avoid this lock when
180 num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
181 probably not worth checking this for a cache-hot lock. */
182 spin_lock(&f->tlbstate_lock);
186 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
189 * We have to send the IPI only to
192 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
194 while (!cpus_empty(f->flush_cpumask))
199 spin_unlock(&f->tlbstate_lock);
202 int __cpuinit init_smp_flush(void)
205 for_each_cpu_mask(i, cpu_possible_map) {
206 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
211 core_initcall(init_smp_flush);
213 void flush_tlb_current_task(void)
215 struct mm_struct *mm = current->mm;
219 cpu_mask = mm->cpu_vm_mask;
220 cpu_clear(smp_processor_id(), cpu_mask);
223 if (!cpus_empty(cpu_mask))
224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
227 EXPORT_SYMBOL(flush_tlb_current_task);
229 void flush_tlb_mm (struct mm_struct * mm)
234 cpu_mask = mm->cpu_vm_mask;
235 cpu_clear(smp_processor_id(), cpu_mask);
237 if (current->active_mm == mm) {
241 leave_mm(smp_processor_id());
243 if (!cpus_empty(cpu_mask))
244 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
248 EXPORT_SYMBOL(flush_tlb_mm);
250 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
252 struct mm_struct *mm = vma->vm_mm;
256 cpu_mask = mm->cpu_vm_mask;
257 cpu_clear(smp_processor_id(), cpu_mask);
259 if (current->active_mm == mm) {
263 leave_mm(smp_processor_id());
266 if (!cpus_empty(cpu_mask))
267 flush_tlb_others(cpu_mask, mm, va);
271 EXPORT_SYMBOL(flush_tlb_page);
273 static void do_flush_tlb_all(void* info)
275 unsigned long cpu = smp_processor_id();
278 if (read_pda(mmu_state) == TLBSTATE_LAZY)
282 void flush_tlb_all(void)
284 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
288 * this function sends a 'reschedule' IPI to another CPU.
289 * it goes straight through and wastes no time serializing
290 * anything. Worst case is that we lose a reschedule ...
293 void smp_send_reschedule(int cpu)
295 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
299 * Structure and data for smp_call_function(). This is designed to minimise
300 * static memory requirements. It also looks cleaner.
302 static DEFINE_SPINLOCK(call_lock);
304 struct call_data_struct {
305 void (*func) (void *info);
312 static struct call_data_struct * call_data;
314 void lock_ipi_call_lock(void)
316 spin_lock_irq(&call_lock);
319 void unlock_ipi_call_lock(void)
321 spin_unlock_irq(&call_lock);
325 * this function sends a 'generic call function' IPI to all other CPU
326 * of the system defined in the mask.
330 __smp_call_function_mask(cpumask_t mask,
331 void (*func)(void *), void *info,
334 struct call_data_struct data;
335 cpumask_t allbutself;
338 allbutself = cpu_online_map;
339 cpu_clear(smp_processor_id(), allbutself);
341 cpus_and(mask, mask, allbutself);
342 cpus = cpus_weight(mask);
349 atomic_set(&data.started, 0);
352 atomic_set(&data.finished, 0);
357 /* Send a message to other CPUs */
358 if (cpus_equal(mask, allbutself))
359 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
361 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
363 /* Wait for response */
364 while (atomic_read(&data.started) != cpus)
370 while (atomic_read(&data.finished) != cpus)
376 * smp_call_function_mask(): Run a function on a set of other CPUs.
377 * @mask: The set of cpus to run on. Must not include the current cpu.
378 * @func: The function to run. This must be fast and non-blocking.
379 * @info: An arbitrary pointer to pass to the function.
380 * @wait: If true, wait (atomically) until function has completed on other CPUs.
382 * Returns 0 on success, else a negative status code.
384 * If @wait is true, then returns once @func has returned; otherwise
385 * it returns just before the target cpu calls @func.
387 * You must not call this function with disabled interrupts or from a
388 * hardware interrupt handler or from a bottom half handler.
390 int smp_call_function_mask(cpumask_t mask,
391 void (*func)(void *), void *info,
396 /* Can deadlock when called with interrupts disabled */
397 WARN_ON(irqs_disabled());
399 spin_lock(&call_lock);
400 ret = __smp_call_function_mask(mask, func, info, wait);
401 spin_unlock(&call_lock);
404 EXPORT_SYMBOL(smp_call_function_mask);
407 * smp_call_function_single - Run a function on a specific CPU
408 * @func: The function to run. This must be fast and non-blocking.
409 * @info: An arbitrary pointer to pass to the function.
410 * @nonatomic: Currently unused.
411 * @wait: If true, wait until function has completed on other CPUs.
413 * Retrurns 0 on success, else a negative status code.
415 * Does not return until the remote CPU is nearly ready to execute <func>
416 * or is or has executed.
419 int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
420 int nonatomic, int wait)
422 /* prevent preemption and reschedule on another processor */
426 /* Can deadlock when called with interrupts disabled */
427 WARN_ON(irqs_disabled());
437 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
442 EXPORT_SYMBOL(smp_call_function_single);
445 * smp_call_function - run a function on all other CPUs.
446 * @func: The function to run. This must be fast and non-blocking.
447 * @info: An arbitrary pointer to pass to the function.
448 * @nonatomic: currently unused.
449 * @wait: If true, wait (atomically) until function has completed on other
452 * Returns 0 on success, else a negative status code. Does not return until
453 * remote CPUs are nearly ready to execute func or are or have executed.
455 * You must not call this function with disabled interrupts or from a
456 * hardware interrupt handler or from a bottom half handler.
457 * Actually there are a few legal cases, like panic.
459 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
462 return smp_call_function_mask(cpu_online_map, func, info, wait);
464 EXPORT_SYMBOL(smp_call_function);
466 static void stop_this_cpu(void *dummy)
472 cpu_clear(smp_processor_id(), cpu_online_map);
473 disable_local_APIC();
478 void smp_send_stop(void)
486 /* Don't deadlock on the call lock in panic */
487 nolock = !spin_trylock(&call_lock);
488 local_irq_save(flags);
489 __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
491 spin_unlock(&call_lock);
492 disable_local_APIC();
493 local_irq_restore(flags);
497 * Reschedule call back. Nothing to do,
498 * all the work is done automatically when
499 * we return from the interrupt.
501 asmlinkage void smp_reschedule_interrupt(void)
504 add_pda(irq_resched_count, 1);
507 asmlinkage void smp_call_function_interrupt(void)
509 void (*func) (void *info) = call_data->func;
510 void *info = call_data->info;
511 int wait = call_data->wait;
515 * Notify initiating CPU that I've grabbed the data and am
516 * about to execute the function
519 atomic_inc(&call_data->started);
521 * At this point the info structure may be out of scope unless wait==1
526 add_pda(irq_call_count, 1);
530 atomic_inc(&call_data->finished);