1 #ifndef _ASM_IA64_TLBFLUSH_H
2 #define _ASM_IA64_TLBFLUSH_H
5 * Copyright (C) 2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
12 #include <asm/intrinsics.h>
13 #include <asm/mmu_context.h>
17 * Now for some TLB flushing routines. This is the kind of stuff that
18 * can be very expensive, so try to avoid them whenever possible.
22 * Flush everything (kernel mapping may also have changed due to
25 extern void local_flush_tlb_all (void);
28 extern void smp_flush_tlb_all (void);
29 extern void smp_flush_tlb_mm (struct mm_struct *mm);
30 # define flush_tlb_all() smp_flush_tlb_all()
32 # define flush_tlb_all() local_flush_tlb_all()
36 local_finish_flush_tlb_mm (struct mm_struct *mm)
38 if (mm == current->active_mm)
43 * Flush a specified user mapping. This is called, e.g., as a result of fork() and
44 * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
45 * the PTEs of the parent task.
48 flush_tlb_mm (struct mm_struct *mm)
53 set_bit(mm->context, ia64_ctx.flushmap);
56 if (atomic_read(&mm->mm_users) == 0)
57 return; /* happens as a result of exit_mmap() */
62 local_finish_flush_tlb_mm(mm);
66 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
69 * Page-granular tlb flush.
72 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
75 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
77 if (vma->vm_mm == current->active_mm)
78 ia64_ptcl(addr, (PAGE_SHIFT << 2));
80 vma->vm_mm->context = 0;
85 * Flush the TLB entries mapping the virtually mapped linear page
86 * table corresponding to address range [START-END).
89 flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
92 * Deprecated. The virtual page table is now flushed via the normal gather/flush
93 * interface (see tlb.h).
97 #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
99 #endif /* _ASM_IA64_TLBFLUSH_H */