1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
6 #ifndef __HAVE_ARCH_PTEP_ESTABLISH
8 * Establish a new mapping:
10 * - update the page tables
11 * - inform the TLB about the new one
13 * We hold the mm semaphore for reading, and the pte lock.
15 * Note: the old pte is known to not be writable, so we don't need to
16 * worry about dirty bits etc getting lost.
18 #ifndef __HAVE_ARCH_SET_PTE_ATOMIC
19 #define ptep_establish(__vma, __address, __ptep, __entry) \
21 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
22 flush_tlb_page(__vma, __address); \
24 #else /* __HAVE_ARCH_SET_PTE_ATOMIC */
25 #define ptep_establish(__vma, __address, __ptep, __entry) \
27 set_pte_atomic(__ptep, __entry); \
28 flush_tlb_page(__vma, __address); \
30 #endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
33 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
35 * Largely same as above, but only sets the access flags (dirty,
36 * accessed, and writable). Furthermore, we know it always gets set
37 * to a "more permissive" setting, which allows most architectures
40 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
42 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
43 flush_tlb_page(__vma, __address); \
47 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
48 #define ptep_test_and_clear_young(__vma, __address, __ptep) \
50 pte_t __pte = *(__ptep); \
52 if (!pte_young(__pte)) \
55 set_pte_at((__vma)->vm_mm, (__address), \
56 (__ptep), pte_mkold(__pte)); \
61 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
62 #define ptep_clear_flush_young(__vma, __address, __ptep) \
65 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
67 flush_tlb_page(__vma, __address); \
72 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
73 #define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
75 pte_t __pte = *__ptep; \
77 if (!pte_dirty(__pte)) \
80 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
81 pte_mkclean(__pte)); \
86 #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
87 #define ptep_clear_flush_dirty(__vma, __address, __ptep) \
90 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
92 flush_tlb_page(__vma, __address); \
97 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
98 #define ptep_get_and_clear(__mm, __address, __ptep) \
100 pte_t __pte = *(__ptep); \
101 pte_clear((__mm), (__address), (__ptep)); \
106 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
107 #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
110 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
115 #ifndef __HAVE_ARCH_PTE_CLEAR_FULL
116 #define pte_clear_full(__mm, __address, __ptep, __full) \
118 pte_clear((__mm), (__address), (__ptep)); \
122 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
123 #define ptep_clear_flush(__vma, __address, __ptep) \
126 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
127 flush_tlb_page(__vma, __address); \
132 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
134 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
136 pte_t old_pte = *ptep;
137 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
141 #ifndef __HAVE_ARCH_PTE_SAME
142 #define pte_same(A,B) (pte_val(A) == pte_val(B))
145 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
146 #define page_test_and_clear_dirty(page) (0)
147 #define pte_maybe_dirty(pte) pte_dirty(pte)
149 #define pte_maybe_dirty(pte) (1)
152 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
153 #define page_test_and_clear_young(page) (0)
156 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
157 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
160 #ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
161 #define lazy_mmu_prot_update(pte) do { } while (0)
164 #ifndef __HAVE_ARCH_MOVE_PTE
165 #define move_pte(pte, prot, old_addr, new_addr) (pte)
169 * When walking page tables, get the address of the next boundary,
170 * or the end address of the range if that comes earlier. Although no
171 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
174 #define pgd_addr_end(addr, end) \
175 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
176 (__boundary - 1 < (end) - 1)? __boundary: (end); \
180 #define pud_addr_end(addr, end) \
181 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
182 (__boundary - 1 < (end) - 1)? __boundary: (end); \
187 #define pmd_addr_end(addr, end) \
188 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
189 (__boundary - 1 < (end) - 1)? __boundary: (end); \
194 * When walking page tables, we usually want to skip any p?d_none entries;
195 * and any p?d_bad entries - reporting the error before resetting to none.
196 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
198 void pgd_clear_bad(pgd_t *);
199 void pud_clear_bad(pud_t *);
200 void pmd_clear_bad(pmd_t *);
202 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
206 if (unlikely(pgd_bad(*pgd))) {
213 static inline int pud_none_or_clear_bad(pud_t *pud)
217 if (unlikely(pud_bad(*pud))) {
224 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
228 if (unlikely(pmd_bad(*pmd))) {
234 #endif /* !__ASSEMBLY__ */
236 #endif /* _ASM_GENERIC_PGTABLE_H */