Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __I386_SCHED_H |
2 | #define __I386_SCHED_H | |
3 | ||
1da177e4 LT |
4 | #include <asm/desc.h> |
5 | #include <asm/atomic.h> | |
6 | #include <asm/pgalloc.h> | |
7 | #include <asm/tlbflush.h> | |
d6dd61c8 JF |
8 | #include <asm/paravirt.h> |
9 | #ifndef CONFIG_PARAVIRT | |
10 | #include <asm-generic/mm_hooks.h> | |
11 | ||
12 | static inline void paravirt_activate_mm(struct mm_struct *prev, | |
13 | struct mm_struct *next) | |
14 | { | |
15 | } | |
16 | #endif /* !CONFIG_PARAVIRT */ | |
17 | ||
1da177e4 LT |
18 | |
19 | /* | |
20 | * Used for LDT copy/destruction. | |
21 | */ | |
22 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
23 | void destroy_context(struct mm_struct *mm); | |
24 | ||
25 | ||
26 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
27 | { | |
28 | #ifdef CONFIG_SMP | |
29 | unsigned cpu = smp_processor_id(); | |
30 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | |
31 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; | |
32 | #endif | |
33 | } | |
34 | ||
53787013 JF |
35 | void leave_mm(unsigned long cpu); |
36 | ||
1da177e4 LT |
37 | static inline void switch_mm(struct mm_struct *prev, |
38 | struct mm_struct *next, | |
39 | struct task_struct *tsk) | |
40 | { | |
41 | int cpu = smp_processor_id(); | |
42 | ||
43 | if (likely(prev != next)) { | |
44 | /* stop flush ipis for the previous mm */ | |
45 | cpu_clear(cpu, prev->cpu_vm_mask); | |
46 | #ifdef CONFIG_SMP | |
47 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | |
48 | per_cpu(cpu_tlbstate, cpu).active_mm = next; | |
49 | #endif | |
50 | cpu_set(cpu, next->cpu_vm_mask); | |
51 | ||
52 | /* Re-load page tables */ | |
53 | load_cr3(next->pgd); | |
54 | ||
55 | /* | |
56 | * load the LDT, if the LDT is different: | |
57 | */ | |
58 | if (unlikely(prev->context.ldt != next->context.ldt)) | |
e5e3a042 | 59 | load_LDT_nolock(&next->context); |
1da177e4 LT |
60 | } |
61 | #ifdef CONFIG_SMP | |
62 | else { | |
63 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | |
64 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | |
65 | ||
66 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | |
67 | /* We were in lazy tlb mode and leave_mm disabled | |
68 | * tlb flush IPI delivery. We must reload %cr3. | |
69 | */ | |
70 | load_cr3(next->pgd); | |
e5e3a042 | 71 | load_LDT_nolock(&next->context); |
1da177e4 LT |
72 | } |
73 | } | |
74 | #endif | |
75 | } | |
76 | ||
f95d47ca | 77 | #define deactivate_mm(tsk, mm) \ |
464d1a78 | 78 | asm("movl %0,%%gs": :"r" (0)); |
1da177e4 | 79 | |
d6dd61c8 JF |
80 | #define activate_mm(prev, next) \ |
81 | do { \ | |
82 | paravirt_activate_mm(prev, next); \ | |
83 | switch_mm((prev),(next),NULL); \ | |
84 | } while(0); | |
1da177e4 LT |
85 | |
86 | #endif |