1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
4 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5 #define FIRST_USER_ADDRESS 0
7 #define _PAGE_BIT_PRESENT 0 /* is present */
8 #define _PAGE_BIT_RW 1 /* writeable */
9 #define _PAGE_BIT_USER 2 /* userspace addressable */
10 #define _PAGE_BIT_PWT 3 /* page write through */
11 #define _PAGE_BIT_PCD 4 /* page cache disabled */
12 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
13 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
14 #define _PAGE_BIT_FILE 6
15 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
18 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
19 #define _PAGE_BIT_UNUSED2 10
20 #define _PAGE_BIT_UNUSED3 11
21 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
22 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
25 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a
26 * sign-extended value on 32-bit with all 1's in the upper word,
27 * which preserves the upper pte values on 64-bit ptes:
29 #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
30 #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
31 #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
32 #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
33 #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
34 #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
35 #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
36 #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
37 #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
38 #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
39 #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
40 #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
41 #define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
42 #define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
44 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
45 #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
50 /* If _PAGE_PRESENT is clear, we use these: */
51 #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
52 * saved PTE; unset:swap */
53 #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
54 pte_present gives true */
56 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
57 _PAGE_ACCESSED | _PAGE_DIRTY)
58 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
61 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
63 #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
64 #define _PAGE_CACHE_WB (0)
65 #define _PAGE_CACHE_WC (_PAGE_PWT)
66 #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
67 #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
69 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
70 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
71 _PAGE_ACCESSED | _PAGE_NX)
73 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
74 _PAGE_USER | _PAGE_ACCESSED)
75 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
76 _PAGE_ACCESSED | _PAGE_NX)
77 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
79 #define PAGE_COPY PAGE_COPY_NOEXEC
80 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED | _PAGE_NX)
82 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 #define _PAGE_KERNEL_EXEC \
87 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
88 #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
91 extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
92 #endif /* __ASSEMBLY__ */
94 #define __PAGE_KERNEL_EXEC \
95 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
96 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
99 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
100 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
101 #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
102 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
103 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
104 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
105 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
106 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
107 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
108 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
111 # define MAKE_GLOBAL(x) __pgprot((x))
113 # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
116 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
117 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
118 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
119 #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
120 #define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC)
121 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
122 #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS)
123 #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
124 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
125 #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
126 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
127 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
130 #define __P000 PAGE_NONE
131 #define __P001 PAGE_READONLY
132 #define __P010 PAGE_COPY
133 #define __P011 PAGE_COPY
134 #define __P100 PAGE_READONLY_EXEC
135 #define __P101 PAGE_READONLY_EXEC
136 #define __P110 PAGE_COPY_EXEC
137 #define __P111 PAGE_COPY_EXEC
139 #define __S000 PAGE_NONE
140 #define __S001 PAGE_READONLY
141 #define __S010 PAGE_SHARED
142 #define __S011 PAGE_SHARED
143 #define __S100 PAGE_READONLY_EXEC
144 #define __S101 PAGE_READONLY_EXEC
145 #define __S110 PAGE_SHARED_EXEC
146 #define __S111 PAGE_SHARED_EXEC
151 * ZERO_PAGE is a global shared page that is always zero: used
152 * for zero-mapped memory areas etc..
154 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
155 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
157 extern spinlock_t pgd_lock;
158 extern struct list_head pgd_list;
161 * The following only work if pte_present() is true.
162 * Undefined behaviour if not..
164 static inline int pte_dirty(pte_t pte)
166 return pte_val(pte) & _PAGE_DIRTY;
169 static inline int pte_young(pte_t pte)
171 return pte_val(pte) & _PAGE_ACCESSED;
174 static inline int pte_write(pte_t pte)
176 return pte_val(pte) & _PAGE_RW;
179 static inline int pte_file(pte_t pte)
181 return pte_val(pte) & _PAGE_FILE;
184 static inline int pte_huge(pte_t pte)
186 return pte_val(pte) & _PAGE_PSE;
189 static inline int pte_global(pte_t pte)
191 return pte_val(pte) & _PAGE_GLOBAL;
194 static inline int pte_exec(pte_t pte)
196 return !(pte_val(pte) & _PAGE_NX);
199 static inline int pmd_large(pmd_t pte)
201 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
202 (_PAGE_PSE | _PAGE_PRESENT);
205 static inline pte_t pte_mkclean(pte_t pte)
207 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY);
210 static inline pte_t pte_mkold(pte_t pte)
212 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED);
215 static inline pte_t pte_wrprotect(pte_t pte)
217 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW);
220 static inline pte_t pte_mkexec(pte_t pte)
222 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX);
225 static inline pte_t pte_mkdirty(pte_t pte)
227 return __pte(pte_val(pte) | _PAGE_DIRTY);
230 static inline pte_t pte_mkyoung(pte_t pte)
232 return __pte(pte_val(pte) | _PAGE_ACCESSED);
235 static inline pte_t pte_mkwrite(pte_t pte)
237 return __pte(pte_val(pte) | _PAGE_RW);
240 static inline pte_t pte_mkhuge(pte_t pte)
242 return __pte(pte_val(pte) | _PAGE_PSE);
245 static inline pte_t pte_clrhuge(pte_t pte)
247 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE);
250 static inline pte_t pte_mkglobal(pte_t pte)
252 return __pte(pte_val(pte) | _PAGE_GLOBAL);
255 static inline pte_t pte_clrglobal(pte_t pte)
257 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
260 extern pteval_t __supported_pte_mask;
262 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
264 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
265 pgprot_val(pgprot)) & __supported_pte_mask);
268 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
270 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
271 pgprot_val(pgprot)) & __supported_pte_mask);
274 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
276 pteval_t val = pte_val(pte);
279 * Chop off the NX bit (if present), and add the NX portion of
280 * the newprot (if present):
282 val &= _PAGE_CHG_MASK & ~_PAGE_NX;
283 val |= pgprot_val(newprot) & __supported_pte_mask;
288 #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
290 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
292 #ifdef CONFIG_PARAVIRT
293 #include <asm/paravirt.h>
294 #else /* !CONFIG_PARAVIRT */
295 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
296 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
298 #define set_pte_present(mm, addr, ptep, pte) \
299 native_set_pte_present(mm, addr, ptep, pte)
300 #define set_pte_atomic(ptep, pte) \
301 native_set_pte_atomic(ptep, pte)
303 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
305 #ifndef __PAGETABLE_PUD_FOLDED
306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
307 #define pgd_clear(pgd) native_pgd_clear(pgd)
311 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
314 #ifndef __PAGETABLE_PMD_FOLDED
315 #define pud_clear(pud) native_pud_clear(pud)
318 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
319 #define pmd_clear(pmd) native_pmd_clear(pmd)
321 #define pte_update(mm, addr, ptep) do { } while (0)
322 #define pte_update_defer(mm, addr, ptep) do { } while (0)
323 #endif /* CONFIG_PARAVIRT */
325 #endif /* __ASSEMBLY__ */
328 # include "pgtable_32.h"
330 # include "pgtable_64.h"
343 * Helper function that returns the kernel pagetable entry controlling
344 * the virtual address 'address'. NULL means no pagetable entry present.
345 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
348 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
350 /* local pte updates need not use xchg for locking */
351 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
355 /* Pure native function needs no input for mm, addr */
356 native_pte_clear(NULL, 0, ptep);
360 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
361 pte_t *ptep , pte_t pte)
363 native_set_pte(ptep, pte);
366 #ifndef CONFIG_PARAVIRT
368 * Rules for using pte_update - it must be called after any PTE update which
369 * has not been done using the set_pte / clear_pte interfaces. It is used by
370 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
371 * updates should either be sets, clears, or set_pte_atomic for P->P
372 * transitions, which means this hook should only be called for user PTEs.
373 * This hook implies a P->P protection or access change has taken place, which
374 * requires a subsequent TLB flush. The notification can optionally be delayed
375 * until the TLB flush event by using the pte_update_defer form of the
376 * interface, but care must be taken to assure that the flush happens while
377 * still holding the same page table lock so that the shadow and primary pages
378 * do not become out of sync on SMP.
380 #define pte_update(mm, addr, ptep) do { } while (0)
381 #define pte_update_defer(mm, addr, ptep) do { } while (0)
385 * We only update the dirty/accessed state if we set
386 * the dirty bit by hand in the kernel, since the hardware
387 * will do the accessed bit for us, and we don't want to
388 * race with other CPU's that might be updating the dirty
389 * bit at the same time.
391 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
392 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
394 int __changed = !pte_same(*(ptep), entry); \
395 if (__changed && dirty) { \
397 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
398 flush_tlb_page(vma, address); \
403 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
404 #define ptep_test_and_clear_young(vma, addr, ptep) ({ \
406 if (pte_young(*(ptep))) \
407 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
410 pte_update((vma)->vm_mm, addr, ptep); \
414 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
415 #define ptep_clear_flush_young(vma, address, ptep) \
418 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
420 flush_tlb_page(vma, address); \
424 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
425 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
428 pte_t pte = native_ptep_get_and_clear(ptep);
429 pte_update(mm, addr, ptep);
433 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
434 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
435 unsigned long addr, pte_t *ptep,
441 * Full address destruction in progress; paravirt does not
442 * care about updates and native needs no locking
444 pte = native_local_ptep_get_and_clear(ptep);
446 pte = ptep_get_and_clear(mm, addr, ptep);
451 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
452 static inline void ptep_set_wrprotect(struct mm_struct *mm,
453 unsigned long addr, pte_t *ptep)
455 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
456 pte_update(mm, addr, ptep);
459 #include <asm-generic/pgtable.h>
460 #endif /* __ASSEMBLY__ */
462 #endif /* _ASM_X86_PGTABLE_H */