2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
17 static inline pte_t *lookup_address(unsigned long address)
19 pgd_t *pgd = pgd_offset_k(address);
25 pud = pud_offset(pgd, address);
26 if (!pud_present(*pud))
28 pmd = pmd_offset(pud, address);
29 if (!pmd_present(*pmd))
33 pte = pte_offset_kernel(pmd, address);
34 if (pte && !pte_present(*pte))
39 static struct page *split_large_page(unsigned long address, pgprot_t prot,
44 struct page *base = alloc_pages(GFP_KERNEL, 0);
48 address = __pa(address);
49 addr = address & LARGE_PAGE_MASK;
50 pbase = (pte_t *)page_address(base);
51 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
52 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
53 addr == address ? prot : ref_prot);
59 static void flush_kernel_map(void *address)
61 if (0 && address && cpu_has_clflush) {
62 /* is this worth it? */
64 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
65 asm volatile("clflush (%0)" :: "r" (address + i));
67 asm volatile("wbinvd":::"memory");
69 __flush_tlb_one(address);
75 static inline void flush_map(unsigned long address)
77 on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
80 struct deferred_page {
81 struct deferred_page *next;
83 unsigned long address;
85 static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
87 static inline void save_page(unsigned long address, struct page *fpage)
89 struct deferred_page *df;
90 df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
97 df->address = address;
103 * No more special protections in this 2/4MB area - revert to a
106 static void revert_page(unsigned long address, pgprot_t ref_prot)
113 pgd = pgd_offset_k(address);
114 BUG_ON(pgd_none(*pgd));
115 pud = pud_offset(pgd,address);
116 BUG_ON(pud_none(*pud));
117 pmd = pmd_offset(pud, address);
118 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
119 pgprot_val(ref_prot) |= _PAGE_PSE;
120 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
121 set_pte((pte_t *)pmd, large_pte);
125 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
129 struct page *kpte_page;
131 kpte = lookup_address(address);
133 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
134 kpte_flags = pte_val(*kpte);
135 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
136 if ((kpte_flags & _PAGE_PSE) == 0) {
137 set_pte(kpte, pfn_pte(pfn, prot));
140 * split_large_page will take the reference for this change_page_attr
143 struct page *split = split_large_page(address, prot, ref_prot);
146 set_pte(kpte,mk_pte(split, ref_prot));
150 } else if ((kpte_flags & _PAGE_PSE) == 0) {
151 set_pte(kpte, pfn_pte(pfn, ref_prot));
152 __put_page(kpte_page);
156 /* on x86-64 the direct mapping set at boot is not using 4k pages */
157 BUG_ON(PageReserved(kpte_page));
159 switch (page_count(kpte_page)) {
161 save_page(address, kpte_page);
162 revert_page(address, ref_prot);
165 BUG(); /* memleak and failed 2M page regeneration */
171 * Change the page attributes of an page in the linear mapping.
173 * This should be used when a page is mapped with a different caching policy
174 * than write-back somewhere - some CPUs do not like it when mappings with
175 * different caching policies exist. This changes the page attributes of the
176 * in kernel linear mapping too.
178 * The caller needs to ensure that there are no conflicting mappings elsewhere.
179 * This function only deals with the kernel linear map.
181 * Caller must call global_flush_tlb() after this.
183 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
188 down_write(&init_mm.mmap_sem);
189 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
190 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
192 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
195 /* Handle kernel mapping too which aliases part of the
197 if (__pa(address) < KERNEL_TEXT_SIZE) {
199 pgprot_t prot2 = prot;
200 addr2 = __START_KERNEL_map + __pa(address);
201 pgprot_val(prot2) &= ~_PAGE_NX;
202 err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
205 up_write(&init_mm.mmap_sem);
209 /* Don't call this for MMIO areas that may not have a mem_map entry */
210 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
212 unsigned long addr = (unsigned long)page_address(page);
213 return change_page_attr_addr(addr, numpages, prot);
216 void global_flush_tlb(void)
218 struct deferred_page *df, *next_df;
220 down_read(&init_mm.mmap_sem);
221 df = xchg(&df_list, NULL);
222 up_read(&init_mm.mmap_sem);
223 flush_map((df && !df->next) ? df->address : 0);
224 for (; df; df = next_df) {
227 __free_page(df->fpage);
232 EXPORT_SYMBOL(change_page_attr);
233 EXPORT_SYMBOL(global_flush_tlb);