1 #ifndef __ASM_SH_PGTABLE_64_H
2 #define __ASM_SH_PGTABLE_64_H
5 * include/asm-sh/pgtable_64.h
7 * This file contains the functions and defines necessary to modify and use
8 * the SuperH page table tree.
10 * Copyright (C) 2000, 2001 Paolo Alberelli
11 * Copyright (C) 2003, 2004 Paul Mundt
12 * Copyright (C) 2003, 2004 Richard Curnow
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
18 #include <linux/threads.h>
19 #include <asm/processor.h>
25 #define pte_ERROR(e) \
26 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
27 #define pgd_ERROR(e) \
28 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
31 * Table setting routines. Used within arch/mm only.
33 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
35 static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
37 unsigned long long x = ((unsigned long long) pteval.pte_low);
38 unsigned long long *xp = (unsigned long long *) pteptr;
40 * Sign-extend based on NPHYS.
42 *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
44 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
46 static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
48 pmd_val(*pmdp) = (unsigned long) ptep;
52 * PGD defines. Top level.
55 /* To find an entry in a generic PGD. */
56 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
57 #define __pgd_offset(address) pgd_index(address)
58 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
60 /* To find an entry in a kernel PGD. */
61 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
64 * PMD level access routines. Same notes as above.
66 #define _PMD_EMPTY 0x0
67 /* Either the PMD is empty or present, it's not paged out */
68 #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
69 #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
70 #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
71 #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
73 #define pmd_page_vaddr(pmd_entry) \
74 ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
76 #define pmd_page(pmd) \
77 (virt_to_page(pmd_val(pmd)))
79 /* PMD to PTE dereferencing */
80 #define pte_index(address) \
81 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
83 #define pte_offset_kernel(dir, addr) \
84 ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
86 #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
87 #define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr)
88 #define pte_unmap(pte) do { } while (0)
89 #define pte_unmap_nested(pte) do { } while (0)
92 #define IOBASE_VADDR 0xff000000
93 #define IOBASE_END 0xffffffff
96 * PTEL coherent flags.
97 * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
99 /* The bits that are required in the SH-5 TLB are placed in the h/w-defined
100 positions, to avoid expensive bit shuffling on every refill. The remaining
101 bits are used for s/w purposes and masked out on each refill.
103 Note, the PTE slots are used to hold data of type swp_entry_t when a page is
104 swapped out. Only the _PAGE_PRESENT flag is significant when the page is
105 swapped out, and it must be placed so that it doesn't overlap either the
106 type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
107 at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
108 scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
109 [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
110 into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
111 #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
112 #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
113 #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
114 #define _PAGE_PRESENT 0x004 /* software: page referenced */
115 #define _PAGE_FILE 0x004 /* software: only when !present */
116 #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
117 #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
118 #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
119 #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
120 #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
121 #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
122 #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
123 #define _PAGE_DIRTY 0x400 /* software: page accessed in write */
124 #define _PAGE_ACCESSED 0x800 /* software: page referenced */
126 /* Mask which drops software flags */
127 #define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL
132 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
133 #define _PAGE_SZHUGE (_PAGE_SIZE0)
134 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
135 #define _PAGE_SZHUGE (_PAGE_SIZE1)
136 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
137 #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
141 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
142 * to make pte_mkhuge() happy.
145 # define _PAGE_SZHUGE (0)
149 * Default flags for a Kernel page.
150 * This is fundametally also SHARED because the main use of this define
151 * (other than for PGD/PMD entries) is for the VMALLOC pool which is
154 * _PAGE_EXECUTE is required for modules
157 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
159 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
162 /* Default flags for a User page */
163 #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
165 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
168 * We have full permissions (Read/Write/Execute/Shared).
170 #define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
171 _PAGE_CACHABLE | _PAGE_ACCESSED)
173 #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
174 #define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
176 #define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
179 * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
180 * protection mode for the stack.
182 #define PAGE_COPY PAGE_EXECREAD
184 #define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
185 #define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
186 #define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
187 _PAGE_WRITE | _PAGE_EXECUTE)
188 #define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
190 #define PAGE_KERNEL_NOCACHE \
191 __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
192 _PAGE_EXECUTE | _PAGE_ACCESSED | \
193 _PAGE_DIRTY | _PAGE_SHARED)
195 /* Make it a device mapping for maximum safety (e.g. for mapping device
196 registers into user-space via /dev/map). */
197 #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
198 #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
201 * Handling allocation failures during page table setup.
203 extern void __handle_bad_pmd_kernel(pmd_t * pmd);
204 #define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
207 * PTE level access routines.
210 * It's the tree walk leaf. This is physical address to be stored.
213 * Regarding the choice of _PTE_EMPTY:
215 We must choose a bit pattern that cannot be valid, whether or not the page
216 is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
217 out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
218 left for us to select. If we force bit[7]==0 when swapped out, we could use
219 the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
220 we force bit[7]==1 when swapped out, we can use all zeroes to indicate
221 empty. This is convenient, because the page tables get cleared to zero
222 when they are allocated.
225 #define _PTE_EMPTY 0x0
226 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
227 #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
228 #define pte_none(x) (pte_val(x) == _PTE_EMPTY)
231 * Some definitions to translate between mem_map, PTEs, and page
236 * Given a PTE, return the index of the mem_map[] entry corresponding
237 * to the page frame the PTE. Get the absolute physical address, make
238 * a relative physical address and translate it to an index.
240 #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
241 __MEMORY_START) >> PAGE_SHIFT)
244 * Given a PTE, return the "struct page *".
246 #define pte_page(x) (mem_map + pte_pagenr(x))
249 * Return number of (down rounded) MB corresponding to x pages.
251 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
255 * The following have defined behavior only work if pte_present() is true.
257 static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
258 static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
259 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
260 static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
262 static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
263 static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
264 static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
265 static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
266 static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
267 static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
268 static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
272 * Conversion functions: convert a page and protection to a page entry.
274 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
276 #define mk_pte(page,pgprot) \
280 set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
281 __MEMORY_START | pgprot_val((pgprot)))); \
286 * This takes a (absolute) physical page address that is used
287 * by the remapping functions
289 #define mk_pte_phys(physpage, pgprot) \
290 ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
292 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
293 { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
295 /* Encode and decode a swap entry */
296 #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
297 #define __swp_offset(x) ((x).val >> 8)
298 #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
299 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
300 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
302 /* Encode and decode a nonlinear file mapping entry */
303 #define PTE_FILE_MAX_BITS 29
304 #define pte_to_pgoff(pte) (pte_val(pte))
305 #define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
307 #endif /* !__ASSEMBLY__ */
309 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
310 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
312 #endif /* __ASM_SH_PGTABLE_64_H */