x86: simplify 32-bit cpa largepage splitting
[linux-2.6] / arch / x86 / mm / pageattr_32.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/uaccess.h>
16 #include <asm/pgalloc.h>
17
18 pte_t *lookup_address(unsigned long address, int *level)
19 {
20         pgd_t *pgd = pgd_offset_k(address);
21         pud_t *pud;
22         pmd_t *pmd;
23
24         if (pgd_none(*pgd))
25                 return NULL;
26         pud = pud_offset(pgd, address);
27         if (pud_none(*pud))
28                 return NULL;
29         pmd = pmd_offset(pud, address);
30         if (pmd_none(*pmd))
31                 return NULL;
32         *level = 2;
33         if (pmd_large(*pmd))
34                 return (pte_t *)pmd;
35         *level = 3;
36
37         return pte_offset_kernel(pmd, address);
38 }
39
40 static struct page *
41 split_large_page(unsigned long address, pgprot_t ref_prot)
42 {
43         unsigned long addr;
44         struct page *base;
45         pte_t *pbase;
46         int i;
47
48         base = alloc_pages(GFP_KERNEL, 0);
49         if (!base)
50                 return NULL;
51
52         /*
53          * page_private is used to track the number of entries in
54          * the page table page that have non standard attributes.
55          */
56         address = __pa(address);
57         addr = address & LARGE_PAGE_MASK;
58         pbase = (pte_t *)page_address(base);
59         paravirt_alloc_pt(&init_mm, page_to_pfn(base));
60
61         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
62                 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
63
64         return base;
65 }
66
67 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
68 {
69         unsigned long flags;
70         struct page *page;
71
72         /* change init_mm */
73         set_pte_atomic(kpte, pte);
74         if (SHARED_KERNEL_PMD)
75                 return;
76
77         spin_lock_irqsave(&pgd_lock, flags);
78         for (page = pgd_list; page; page = (struct page *)page->index) {
79                 pgd_t *pgd;
80                 pud_t *pud;
81                 pmd_t *pmd;
82
83                 pgd = (pgd_t *)page_address(page) + pgd_index(address);
84                 pud = pud_offset(pgd, address);
85                 pmd = pmd_offset(pud, address);
86                 set_pte_atomic((pte_t *)pmd, pte);
87         }
88         spin_unlock_irqrestore(&pgd_lock, flags);
89 }
90
91 static int __change_page_attr(struct page *page, pgprot_t prot)
92 {
93         pgprot_t ref_prot = PAGE_KERNEL;
94         struct page *kpte_page;
95         unsigned long address;
96         pgprot_t oldprot;
97         pte_t *kpte;
98         int level;
99
100         BUG_ON(PageHighMem(page));
101         address = (unsigned long)page_address(page);
102
103 repeat:
104         kpte = lookup_address(address, &level);
105         if (!kpte)
106                 return -EINVAL;
107
108         oldprot = pte_pgprot(*kpte);
109         kpte_page = virt_to_page(kpte);
110         BUG_ON(PageLRU(kpte_page));
111         BUG_ON(PageCompound(kpte_page));
112
113         /*
114          * Better fail early if someone sets the kernel text to NX.
115          * Does not cover __inittext
116          */
117         BUG_ON(address >= (unsigned long)&_text &&
118                 address < (unsigned long)&_etext &&
119                (pgprot_val(prot) & _PAGE_NX));
120
121         if ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
122                 ref_prot = PAGE_KERNEL_EXEC;
123
124         ref_prot = canon_pgprot(ref_prot);
125         prot = canon_pgprot(prot);
126
127         if (level == 3) {
128                 set_pte_atomic(kpte, mk_pte(page, prot));
129         } else {
130                 struct page *split;
131
132                 split = split_large_page(address, ref_prot);
133                 if (!split)
134                         return -ENOMEM;
135
136                 /*
137                  * There's a small window here to waste a bit of RAM:
138                  */
139                 set_pmd_pte(kpte, address, mk_pte(split, ref_prot));
140                 goto repeat;
141         }
142         return 0;
143 }
144
145 /*
146  * Change the page attributes of an page in the linear mapping.
147  *
148  * This should be used when a page is mapped with a different caching policy
149  * than write-back somewhere - some CPUs do not like it when mappings with
150  * different caching policies exist. This changes the page attributes of the
151  * in kernel linear mapping too.
152  *
153  * The caller needs to ensure that there are no conflicting mappings elsewhere.
154  * This function only deals with the kernel linear map.
155  *
156  * Caller must call global_flush_tlb() after this.
157  */
158 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
159 {
160         int err = 0, i;
161
162         for (i = 0; i < numpages; i++, page++) {
163                 err = __change_page_attr(page, prot);
164                 if (err)
165                         break;
166         }
167
168         return err;
169 }
170 EXPORT_SYMBOL(change_page_attr);
171
172 int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot)
173 {
174         int i;
175         unsigned long pfn = (addr >> PAGE_SHIFT);
176
177         for (i = 0; i < numpages; i++) {
178                 if (!pfn_valid(pfn + i)) {
179                         break;
180                 } else {
181                         int level;
182                         pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
183                         BUG_ON(pte && !pte_none(*pte));
184                 }
185         }
186         return change_page_attr(virt_to_page(addr), i, prot);
187 }
188
189 static void flush_kernel_map(void *arg)
190 {
191         /*
192          * Flush all to work around Errata in early athlons regarding
193          * large page flushing.
194          */
195         __flush_tlb_all();
196
197         if (boot_cpu_data.x86_model >= 4)
198                 wbinvd();
199 }
200
201 void global_flush_tlb(void)
202 {
203         BUG_ON(irqs_disabled());
204
205         on_each_cpu(flush_kernel_map, NULL, 1, 1);
206 }
207 EXPORT_SYMBOL(global_flush_tlb);
208
209 #ifdef CONFIG_DEBUG_PAGEALLOC
210 void kernel_map_pages(struct page *page, int numpages, int enable)
211 {
212         if (PageHighMem(page))
213                 return;
214         if (!enable) {
215                 debug_check_no_locks_freed(page_address(page),
216                                            numpages * PAGE_SIZE);
217         }
218
219         /*
220          * the return value is ignored - the calls cannot fail,
221          * large pages are disabled at boot time.
222          */
223         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
224
225         /*
226          * we should perform an IPI and flush all tlbs,
227          * but that can deadlock->flush only current cpu.
228          */
229         __flush_tlb_all();
230 }
231 #endif