1 /* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */
2 #ifndef __SPARC64_MMU_CONTEXT_H
3 #define __SPARC64_MMU_CONTEXT_H
5 /* Derived heavily from Linus's Alpha/AXP ASN code... */
9 #include <linux/spinlock.h>
10 #include <asm/system.h>
11 #include <asm/spitfire.h>
13 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17 extern spinlock_t ctx_alloc_lock;
18 extern unsigned long tlb_context_cache;
19 extern unsigned long mmu_context_bmap[];
21 extern void get_new_mmu_context(struct mm_struct *mm);
23 /* Initialize a new mmu context. This is invoked when a new
24 * address space instance (unique or shared) is instantiated.
25 * This just needs to set mm->context to an invalid context.
27 #define init_new_context(__tsk, __mm) \
28 (((__mm)->context.sparc64_ctx_val = 0UL), 0)
30 /* Destroy a dead context. This occurs when mmput drops the
31 * mm_users count to zero, the mmaps have been released, and
32 * all the page tables have been flushed. Our job is to destroy
33 * any remaining processor-specific state, and in the sparc64
34 * case this just means freeing up the mmu context ID held by
37 #define destroy_context(__mm) \
38 do { spin_lock(&ctx_alloc_lock); \
39 if (CTX_VALID((__mm)->context)) { \
40 unsigned long nr = CTX_NRBITS((__mm)->context); \
41 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
43 spin_unlock(&ctx_alloc_lock); \
46 /* Reload the two core values used by TLB miss handler
47 * processing on sparc64. They are:
48 * 1) The physical address of mm->pgd, when full page
49 * table walks are necessary, this is where the
51 * 2) A "PGD cache". For 32-bit tasks only pgd[0] is
52 * ever used since that maps the entire low 4GB
53 * completely. To speed up TLB miss processing we
54 * make this value available to the handlers. This
55 * decreases the amount of memory traffic incurred.
57 #define reload_tlbmiss_state(__tsk, __mm) \
59 register unsigned long paddr asm("o5"); \
60 register unsigned long pgd_cache asm("o4"); \
61 paddr = __pa((__mm)->pgd); \
63 if ((__tsk)->thread_info->flags & _TIF_32BIT) \
64 pgd_cache = get_pgd_cache((__mm)->pgd); \
65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
68 "stxa %1, [%%g4] %2\n\t" \
70 "wrpr %%g0, 0x096, %%pstate" \
72 : "r" (paddr), "r" (pgd_cache),\
73 "i" (ASI_DMMU), "i" (TSB_REG)); \
76 /* Set MMU context in the actual hardware. */
77 #define load_secondary_context(__mm) \
78 __asm__ __volatile__("stxa %0, [%1] %2\n\t" \
81 : "r" (CTX_HWBITS((__mm)->context)), \
82 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU))
84 extern void __flush_tlb_mm(unsigned long, unsigned long);
86 /* Switch the current MM context. */
87 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
89 unsigned long ctx_valid;
91 spin_lock(&mm->page_table_lock);
92 if (CTX_VALID(mm->context))
97 if (!ctx_valid || (old_mm != mm)) {
99 get_new_mmu_context(mm);
101 load_secondary_context(mm);
102 reload_tlbmiss_state(tsk, mm);
106 int cpu = smp_processor_id();
108 /* Even if (mm == old_mm) we _must_ check
109 * the cpu_vm_mask. If we do not we could
110 * corrupt the TLB state because of how
111 * smp_flush_tlb_{page,range,mm} on sparc64
112 * and lazy tlb switches work. -DaveM
114 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
115 cpu_set(cpu, mm->cpu_vm_mask);
116 __flush_tlb_mm(CTX_HWBITS(mm->context),
120 spin_unlock(&mm->page_table_lock);
123 #define deactivate_mm(tsk,mm) do { } while (0)
125 /* Activate a new MM instance for the current task. */
126 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
130 spin_lock(&mm->page_table_lock);
131 if (!CTX_VALID(mm->context))
132 get_new_mmu_context(mm);
133 cpu = smp_processor_id();
134 if (!cpu_isset(cpu, mm->cpu_vm_mask))
135 cpu_set(cpu, mm->cpu_vm_mask);
136 spin_unlock(&mm->page_table_lock);
138 load_secondary_context(mm);
139 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
140 reload_tlbmiss_state(current, mm);
143 #endif /* !(__ASSEMBLY__) */
145 #endif /* !(__SPARC64_MMU_CONTEXT_H) */