1 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
2 #define _ASM_X86_PGTABLE_3LEVEL_H
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
11 #define pte_ERROR(e) \
12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14 #define pmd_ERROR(e) \
15 printk("%s:%d: bad pmd %p(%016Lx).\n", \
16 __FILE__, __LINE__, &(e), pmd_val(e))
17 #define pgd_ERROR(e) \
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e))
21 static inline int pud_none(pud_t pud)
23 return pud_val(pud) == 0;
26 static inline int pud_bad(pud_t pud)
28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
31 /* Rules for using set_pte: the pte being assigned *must* be
32 * either not present or in a state where the hardware will
33 * not attempt to update the pte. In places where this is
34 * not possible, use pte_get_and_clear to obtain the old pte
35 * value and then use set_pte to update it. -ben
37 static inline void native_set_pte(pte_t *ptep, pte_t pte)
39 ptep->pte_high = pte.pte_high;
41 ptep->pte_low = pte.pte_low;
45 * Since this is only called on user PTEs, and the page fault handler
46 * must handle the already racy situation of simultaneous page faults,
47 * we are justified in merely clearing the PTE present bit, followed
48 * by a set. The ordering here is important.
50 static inline void native_set_pte_present(struct mm_struct *mm,
52 pte_t *ptep, pte_t pte)
56 ptep->pte_high = pte.pte_high;
58 ptep->pte_low = pte.pte_low;
61 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
63 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
66 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
68 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
71 static inline void native_set_pud(pud_t *pudp, pud_t pud)
73 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
77 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
78 * entry, so clear the bottom half first and enforce ordering with a compiler
81 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
89 static inline void native_pmd_clear(pmd_t *pmd)
91 u32 *tmp = (u32 *)pmd;
97 static inline void pud_clear(pud_t *pudp)
101 set_pud(pudp, __pud(0));
104 * According to Intel App note "TLBs, Paging-Structure Caches,
105 * and Their Invalidation", April 2007, document 317080-001,
106 * section 8.1: in PAE mode we explicitly have to flush the
107 * TLB via cr3 if the top-level pgd is changed...
109 * Make sure the pud entry we're updating is within the
110 * current pgd to avoid unnecessary TLB flushes.
113 if (__pa(pudp) >= pgd && __pa(pudp) <
114 (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
119 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
123 /* xchg acts as a barrier before the setting of the high bits */
124 res.pte_low = xchg(&ptep->pte_low, 0);
125 res.pte_high = ptep->pte_high;
131 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
135 * Bits 0, 6 and 7 are taken in the low part of the pte,
136 * put the 32 bits of offset into the high part.
138 #define pte_to_pgoff(pte) ((pte).pte_high)
139 #define pgoff_to_pte(off) \
140 ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
141 #define PTE_FILE_MAX_BITS 32
143 /* Encode and de-code a swap entry */
144 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
145 #define __swp_type(x) (((x).val) & 0x1f)
146 #define __swp_offset(x) ((x).val >> 5)
147 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
148 #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
149 #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
151 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */