2 * include/asm-s390/pgalloc.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
16 #include <linux/threads.h>
17 #include <linux/gfp.h>
20 #define check_pgt_cache() do {} while (0)
22 extern void diag10(unsigned long addr);
25 * Page allocation orders.
28 # define PTE_ALLOC_ORDER 0
29 # define PMD_ALLOC_ORDER 0
30 # define PGD_ALLOC_ORDER 1
32 # define PTE_ALLOC_ORDER 0
33 # define PMD_ALLOC_ORDER 2
34 # define PGD_ALLOC_ORDER 2
35 #endif /* __s390x__ */
38 * Allocate and free page tables. The xxx_kernel() versions are
39 * used to allocate a kernel page table - this turns on ASN bits
43 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
51 pgd_t *shadow_pgd = (pgd_t *)
52 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
53 struct page *page = virt_to_page(pgd);
56 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
59 page->lru.next = (void *) shadow_pgd;
61 for (i = 0; i < PTRS_PER_PGD; i++)
63 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
70 static inline void pgd_free(pgd_t *pgd)
72 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
75 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
76 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
81 * page middle directory allocation/free routines.
82 * We use pmd cache only on s390x, so these are dummy routines. This
83 * code never triggers because the pgd will always be present.
85 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
86 #define pmd_free(x) do { } while (0)
87 #define __pmd_free_tlb(tlb,x) do { } while (0)
88 #define pgd_populate(mm, pmd, pte) BUG()
89 #define pgd_populate_kernel(mm, pmd, pte) BUG()
91 static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
93 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
99 pmd_t *shadow_pmd = (pmd_t *)
100 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
101 struct page *page = virt_to_page(pmd);
104 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
107 page->lru.next = (void *) shadow_pmd;
109 for (i=0; i < PTRS_PER_PMD; i++)
114 static inline void pmd_free (pmd_t *pmd)
116 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
119 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
120 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
123 #define __pmd_free_tlb(tlb,pmd) \
125 tlb_flush_mmu(tlb, 0, 0); \
130 pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
132 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
135 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
137 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
138 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
140 if (shadow_pgd && shadow_pmd)
141 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
142 pgd_populate_kernel(mm, pgd, pmd);
145 #endif /* __s390x__ */
148 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
151 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
152 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
153 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
154 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
155 #else /* __s390x__ */
156 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
157 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
158 #endif /* __s390x__ */
162 pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
164 pte_t *pte = (pte_t *)page_to_phys(page);
165 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
166 pte_t *shadow_pte = get_shadow_pte(pte);
168 pmd_populate_kernel(mm, pmd, pte);
169 if (shadow_pmd && shadow_pte)
170 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
174 * page table entry allocation/free routines.
176 static inline pte_t *
177 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
179 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
185 pte_t *shadow_pte = (pte_t *)
186 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
187 struct page *page = virt_to_page(pte);
190 free_page((unsigned long) pte);
193 page->lru.next = (void *) shadow_pte;
195 for (i=0; i < PTRS_PER_PTE; i++) {
196 pte_clear(mm, vmaddr, pte + i);
202 static inline struct page *
203 pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
205 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
207 return virt_to_page(pte);
211 static inline void pte_free_kernel(pte_t *pte)
213 pte_t *shadow_pte = get_shadow_pte(pte);
216 free_page((unsigned long) shadow_pte);
217 free_page((unsigned long) pte);
220 static inline void pte_free(struct page *pte)
222 struct page *shadow_page = get_shadow_page(pte);
225 __free_page(shadow_page);
229 #define __pte_free_tlb(tlb, pte) \
231 struct mmu_gather *__tlb = (tlb); \
232 struct page *__pte = (pte); \
233 struct page *shadow_page = get_shadow_page(__pte); \
235 tlb_remove_page(__tlb, shadow_page); \
236 tlb_remove_page(__tlb, __pte); \
239 #endif /* _S390_PGALLOC_H */