1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
8 #include <asm/paravirt.h>
9 #ifndef CONFIG_PARAVIRT
10 #include <asm-generic/mm_hooks.h>
12 static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
16 #endif /* !CONFIG_PARAVIRT */
19 * Used for LDT copy/destruction.
21 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22 void destroy_context(struct mm_struct *mm);
25 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
33 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 struct task_struct *tsk)
36 unsigned cpu = smp_processor_id();
38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpu_clear(cpu, prev->cpu_vm_mask);
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next);
45 cpu_set(cpu, next->cpu_vm_mask);
47 /* Re-load page tables */
51 * load the LDT, if the LDT is different:
53 if (unlikely(prev->context.ldt != next->context.ldt))
54 load_LDT_nolock(&next->context);
58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
61 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
62 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload CR3
64 * to make sure to use no freed page tables.
67 load_LDT_nolock(&next->context);
73 #define activate_mm(prev, next) \
75 paravirt_activate_mm((prev), (next)); \
76 switch_mm((prev), (next), NULL); \
80 #define deactivate_mm(tsk, mm) \
85 #define deactivate_mm(tsk, mm) \
92 #endif /* _ASM_X86_MMU_CONTEXT_H */