1 #ifndef ASM_X86__MMU_CONTEXT_64_H
2 #define ASM_X86__MMU_CONTEXT_64_H
6 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9 if (read_pda(mmu_state) == TLBSTATE_OK)
10 write_pda(mmu_state, TLBSTATE_LAZY);
14 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15 struct task_struct *tsk)
17 unsigned cpu = smp_processor_id();
18 if (likely(prev != next)) {
19 /* stop flush ipis for the previous mm */
20 cpu_clear(cpu, prev->cpu_vm_mask);
22 write_pda(mmu_state, TLBSTATE_OK);
23 write_pda(active_mm, next);
25 cpu_set(cpu, next->cpu_vm_mask);
28 if (unlikely(next->context.ldt != prev->context.ldt))
29 load_LDT_nolock(&next->context);
33 write_pda(mmu_state, TLBSTATE_OK);
34 if (read_pda(active_mm) != next)
36 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
37 /* We were in lazy tlb mode and leave_mm disabled
38 * tlb flush IPI delivery. We must reload CR3
39 * to make sure to use no freed page tables.
42 load_LDT_nolock(&next->context);
48 #define deactivate_mm(tsk, mm) \
51 asm volatile("movl %0,%%fs"::"r"(0)); \
54 #endif /* ASM_X86__MMU_CONTEXT_64_H */