[PATCH] x86: GDT alignment fix
[linux-2.6] / include / asm-powerpc / pgalloc.h
1 #ifndef _ASM_POWERPC_PGALLOC_H
2 #define _ASM_POWERPC_PGALLOC_H
3
4 #ifndef CONFIG_PPC64
5 #include <asm-ppc/pgalloc.h>
6 #else
7
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/cpumask.h>
11 #include <linux/percpu.h>
12
13 extern kmem_cache_t *pgtable_cache[];
14
15 #ifdef CONFIG_PPC_64K_PAGES
16 #define PTE_CACHE_NUM   0
17 #define PMD_CACHE_NUM   1
18 #define PGD_CACHE_NUM   2
19 #else
20 #define PTE_CACHE_NUM   0
21 #define PMD_CACHE_NUM   1
22 #define PUD_CACHE_NUM   1
23 #define PGD_CACHE_NUM   0
24 #endif
25
26 /*
27  * This program is free software; you can redistribute it and/or
28  * modify it under the terms of the GNU General Public License
29  * as published by the Free Software Foundation; either version
30  * 2 of the License, or (at your option) any later version.
31  */
32
33 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
34 {
35         return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
36 }
37
38 static inline void pgd_free(pgd_t *pgd)
39 {
40         kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
41 }
42
43 #ifndef CONFIG_PPC_64K_PAGES
44
45 #define pgd_populate(MM, PGD, PUD)      pgd_set(PGD, PUD)
46
47 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
48 {
49         return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
50                                 GFP_KERNEL|__GFP_REPEAT);
51 }
52
53 static inline void pud_free(pud_t *pud)
54 {
55         kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
56 }
57
58 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
59 {
60         pud_set(pud, (unsigned long)pmd);
61 }
62
63 #define pmd_populate(mm, pmd, pte_page) \
64         pmd_populate_kernel(mm, pmd, page_address(pte_page))
65 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
66
67
68 #else /* CONFIG_PPC_64K_PAGES */
69
70 #define pud_populate(mm, pud, pmd)      pud_set(pud, (unsigned long)pmd)
71
72 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
73                                        pte_t *pte)
74 {
75         pmd_set(pmd, (unsigned long)pte);
76 }
77
78 #define pmd_populate(mm, pmd, pte_page) \
79         pmd_populate_kernel(mm, pmd, page_address(pte_page))
80
81 #endif /* CONFIG_PPC_64K_PAGES */
82
83 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
84 {
85         return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
86                                 GFP_KERNEL|__GFP_REPEAT);
87 }
88
89 static inline void pmd_free(pmd_t *pmd)
90 {
91         kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
92 }
93
94 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
95                                           unsigned long address)
96 {
97         return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
98                                 GFP_KERNEL|__GFP_REPEAT);
99 }
100
101 static inline struct page *pte_alloc_one(struct mm_struct *mm,
102                                          unsigned long address)
103 {
104         return virt_to_page(pte_alloc_one_kernel(mm, address));
105 }
106                 
107 static inline void pte_free_kernel(pte_t *pte)
108 {
109         kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
110 }
111
112 static inline void pte_free(struct page *ptepage)
113 {
114         pte_free_kernel(page_address(ptepage));
115 }
116
117 #define PGF_CACHENUM_MASK       0xf
118
119 typedef struct pgtable_free {
120         unsigned long val;
121 } pgtable_free_t;
122
123 static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
124                                                 unsigned long mask)
125 {
126         BUG_ON(cachenum > PGF_CACHENUM_MASK);
127
128         return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
129 }
130
131 static inline void pgtable_free(pgtable_free_t pgf)
132 {
133         void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
134         int cachenum = pgf.val & PGF_CACHENUM_MASK;
135
136         kmem_cache_free(pgtable_cache[cachenum], p);
137 }
138
139 extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
140
141 #define __pte_free_tlb(tlb, ptepage)    \
142         pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
143                 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
144 #define __pmd_free_tlb(tlb, pmd)        \
145         pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
146                 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
147 #ifndef CONFIG_PPC_64K_PAGES
148 #define __pud_free_tlb(tlb, pmd)        \
149         pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
150                 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
151 #endif /* CONFIG_PPC_64K_PAGES */
152
153 #define check_pgt_cache()       do { } while (0)
154
155 #endif /* CONFIG_PPC64 */
156 #endif /* _ASM_POWERPC_PGALLOC_H */