1 #ifndef _SPARC64_TLBFLUSH_H
2 #define _SPARC64_TLBFLUSH_H
5 #include <asm/mmu_context.h>
7 /* TSB flush operations. */
9 extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
10 extern void flush_tsb_user(struct mmu_gather *mp);
12 /* TLB flush operations. */
14 extern void flush_tlb_pending(void);
16 #define flush_tlb_range(vma,start,end) \
17 do { (void)(start); flush_tlb_pending(); } while (0)
18 #define flush_tlb_page(vma,addr) flush_tlb_pending()
19 #define flush_tlb_mm(mm) flush_tlb_pending()
22 extern void __flush_tlb_all(void);
24 extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
28 #define flush_tlb_kernel_range(start,end) \
29 do { flush_tsb_kernel_range(start,end); \
30 __flush_tlb_kernel_range(start,end); \
33 #else /* CONFIG_SMP */
35 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
37 #define flush_tlb_kernel_range(start, end) \
38 do { flush_tsb_kernel_range(start,end); \
39 smp_flush_tlb_kernel_range(start, end); \
42 #endif /* ! CONFIG_SMP */
44 static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
46 /* We don't use virtual page tables for TLB miss processing
47 * any more. Nowadays we use the TSB.
51 #endif /* _SPARC64_TLBFLUSH_H */