x86: don't call MP_processor_info for disabled cpu (64bit)
[linux-2.6] / include / asm-x86 / mmu_context_64.h
1 #ifndef __X86_64_MMU_CONTEXT_H
2 #define __X86_64_MMU_CONTEXT_H
3
4 #include <asm/desc.h>
5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/pda.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #ifndef CONFIG_PARAVIRT
11 #include <asm-generic/mm_hooks.h>
12 #endif
13
14 /*
15  * possibly do the LDT unload here?
16  */
17 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 void destroy_context(struct mm_struct *mm);
19
20 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
21 {
22 #ifdef CONFIG_SMP
23         if (read_pda(mmu_state) == TLBSTATE_OK)
24                 write_pda(mmu_state, TLBSTATE_LAZY);
25 #endif
26 }
27
28 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
29                              struct task_struct *tsk)
30 {
31         unsigned cpu = smp_processor_id();
32         if (likely(prev != next)) {
33                 /* stop flush ipis for the previous mm */
34                 cpu_clear(cpu, prev->cpu_vm_mask);
35 #ifdef CONFIG_SMP
36                 write_pda(mmu_state, TLBSTATE_OK);
37                 write_pda(active_mm, next);
38 #endif
39                 cpu_set(cpu, next->cpu_vm_mask);
40                 load_cr3(next->pgd);
41
42                 if (unlikely(next->context.ldt != prev->context.ldt))
43                         load_LDT_nolock(&next->context);
44         }
45 #ifdef CONFIG_SMP
46         else {
47                 write_pda(mmu_state, TLBSTATE_OK);
48                 if (read_pda(active_mm) != next)
49                         BUG();
50                 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
51                         /* We were in lazy tlb mode and leave_mm disabled
52                          * tlb flush IPI delivery. We must reload CR3
53                          * to make sure to use no freed page tables.
54                          */
55                         load_cr3(next->pgd);
56                         load_LDT_nolock(&next->context);
57                 }
58         }
59 #endif
60 }
61
62 #define deactivate_mm(tsk, mm)                  \
63 do {                                            \
64         load_gs_index(0);                       \
65         asm volatile("movl %0,%%fs"::"r"(0));   \
66 } while (0)
67
68 #define activate_mm(prev, next)                 \
69         switch_mm((prev), (next), NULL)
70
71
72 #endif