x86: mm/discontig_32.c: make code static
[linux-2.6] / arch / x86 / mm / pageattr_64.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/io.h>
15
16 pte_t *lookup_address(unsigned long address)
17
18         pgd_t *pgd = pgd_offset_k(address);
19         pud_t *pud;
20         pmd_t *pmd;
21         pte_t *pte;
22         if (pgd_none(*pgd))
23                 return NULL;
24         pud = pud_offset(pgd, address);
25         if (!pud_present(*pud))
26                 return NULL; 
27         pmd = pmd_offset(pud, address);
28         if (!pmd_present(*pmd))
29                 return NULL; 
30         if (pmd_large(*pmd))
31                 return (pte_t *)pmd;
32         pte = pte_offset_kernel(pmd, address);
33         if (pte && !pte_present(*pte))
34                 pte = NULL; 
35         return pte;
36
37
38 static struct page *split_large_page(unsigned long address, pgprot_t prot,
39                                      pgprot_t ref_prot)
40
41         int i; 
42         unsigned long addr;
43         struct page *base = alloc_pages(GFP_KERNEL, 0);
44         pte_t *pbase;
45         if (!base) 
46                 return NULL;
47         /*
48          * page_private is used to track the number of entries in
49          * the page table page have non standard attributes.
50          */
51         SetPagePrivate(base);
52         page_private(base) = 0;
53
54         address = __pa(address);
55         addr = address & LARGE_PAGE_MASK; 
56         pbase = (pte_t *)page_address(base);
57         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
58                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
59                                    addr == address ? prot : ref_prot);
60         }
61         return base;
62
63
64 void clflush_cache_range(void *adr, int size)
65 {
66         int i;
67         for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
68                 clflush(adr+i);
69 }
70
71 static void flush_kernel_map(void *arg)
72 {
73         struct list_head *l = (struct list_head *)arg;
74         struct page *pg;
75
76         /* When clflush is available always use it because it is
77            much cheaper than WBINVD. */
78         /* clflush is still broken. Disable for now. */
79         if (1 || !cpu_has_clflush)
80                 asm volatile("wbinvd" ::: "memory");
81         else list_for_each_entry(pg, l, lru) {
82                 void *adr = page_address(pg);
83                 clflush_cache_range(adr, PAGE_SIZE);
84         }
85         __flush_tlb_all();
86 }
87
88 static inline void flush_map(struct list_head *l)
89 {       
90         on_each_cpu(flush_kernel_map, l, 1, 1);
91 }
92
93 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
94
95 static inline void save_page(struct page *fpage)
96 {
97         if (!test_and_set_bit(PG_arch_1, &fpage->flags))
98                 list_add(&fpage->lru, &deferred_pages);
99 }
100
101 /* 
102  * No more special protections in this 2/4MB area - revert to a
103  * large page again. 
104  */
105 static void revert_page(unsigned long address, pgprot_t ref_prot)
106 {
107         pgd_t *pgd;
108         pud_t *pud;
109         pmd_t *pmd;
110         pte_t large_pte;
111         unsigned long pfn;
112
113         pgd = pgd_offset_k(address);
114         BUG_ON(pgd_none(*pgd));
115         pud = pud_offset(pgd,address);
116         BUG_ON(pud_none(*pud));
117         pmd = pmd_offset(pud, address);
118         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
119         pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
120         large_pte = pfn_pte(pfn, ref_prot);
121         large_pte = pte_mkhuge(large_pte);
122         set_pte((pte_t *)pmd, large_pte);
123 }      
124
125 static int
126 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
127                                    pgprot_t ref_prot)
128
129         pte_t *kpte; 
130         struct page *kpte_page;
131         pgprot_t ref_prot2;
132
133         kpte = lookup_address(address);
134         if (!kpte) return 0;
135         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
136         BUG_ON(PageLRU(kpte_page));
137         BUG_ON(PageCompound(kpte_page));
138         if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
139                 if (!pte_huge(*kpte)) {
140                         set_pte(kpte, pfn_pte(pfn, prot));
141                 } else {
142                         /*
143                          * split_large_page will take the reference for this
144                          * change_page_attr on the split page.
145                          */
146                         struct page *split;
147                         ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
148                         split = split_large_page(address, prot, ref_prot2);
149                         if (!split)
150                                 return -ENOMEM;
151                         pgprot_val(ref_prot2) &= ~_PAGE_NX;
152                         set_pte(kpte, mk_pte(split, ref_prot2));
153                         kpte_page = split;
154                 }
155                 page_private(kpte_page)++;
156         } else if (!pte_huge(*kpte)) {
157                 set_pte(kpte, pfn_pte(pfn, ref_prot));
158                 BUG_ON(page_private(kpte_page) == 0);
159                 page_private(kpte_page)--;
160         } else
161                 BUG();
162
163         /* on x86-64 the direct mapping set at boot is not using 4k pages */
164         BUG_ON(PageReserved(kpte_page));
165
166         save_page(kpte_page);
167         if (page_private(kpte_page) == 0)
168                 revert_page(address, ref_prot);
169         return 0;
170
171
172 /*
173  * Change the page attributes of an page in the linear mapping.
174  *
175  * This should be used when a page is mapped with a different caching policy
176  * than write-back somewhere - some CPUs do not like it when mappings with
177  * different caching policies exist. This changes the page attributes of the
178  * in kernel linear mapping too.
179  * 
180  * The caller needs to ensure that there are no conflicting mappings elsewhere.
181  * This function only deals with the kernel linear map.
182  * 
183  * Caller must call global_flush_tlb() after this.
184  */
185 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
186 {
187         int err = 0, kernel_map = 0;
188         int i; 
189
190         if (address >= __START_KERNEL_map
191             && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
192                 address = (unsigned long)__va(__pa(address));
193                 kernel_map = 1;
194         }
195
196         down_write(&init_mm.mmap_sem);
197         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
198                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
199
200                 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
201                         err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
202                         if (err)
203                                 break;
204                 }
205                 /* Handle kernel mapping too which aliases part of the
206                  * lowmem */
207                 if (__pa(address) < KERNEL_TEXT_SIZE) {
208                         unsigned long addr2;
209                         pgprot_t prot2;
210                         addr2 = __START_KERNEL_map + __pa(address);
211                         /* Make sure the kernel mappings stay executable */
212                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
213                         err = __change_page_attr(addr2, pfn, prot2,
214                                                  PAGE_KERNEL_EXEC);
215                 } 
216         }       
217         up_write(&init_mm.mmap_sem); 
218         return err;
219 }
220
221 /* Don't call this for MMIO areas that may not have a mem_map entry */
222 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
223 {
224         unsigned long addr = (unsigned long)page_address(page);
225         return change_page_attr_addr(addr, numpages, prot);
226 }
227
228 void global_flush_tlb(void)
229
230         struct page *pg, *next;
231         struct list_head l;
232
233         /*
234          * Write-protect the semaphore, to exclude two contexts
235          * doing a list_replace_init() call in parallel and to
236          * exclude new additions to the deferred_pages list:
237          */
238         down_write(&init_mm.mmap_sem);
239         list_replace_init(&deferred_pages, &l);
240         up_write(&init_mm.mmap_sem);
241
242         flush_map(&l);
243
244         list_for_each_entry_safe(pg, next, &l, lru) {
245                 list_del(&pg->lru);
246                 clear_bit(PG_arch_1, &pg->flags);
247                 if (page_private(pg) != 0)
248                         continue;
249                 ClearPagePrivate(pg);
250                 __free_page(pg);
251         } 
252
253
254 EXPORT_SYMBOL(change_page_attr);
255 EXPORT_SYMBOL(global_flush_tlb);