2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
16 static inline pte_t *lookup_address(unsigned long address)
18 pgd_t *pgd = pgd_offset_k(address);
24 pud = pud_offset(pgd, address);
25 if (!pud_present(*pud))
27 pmd = pmd_offset(pud, address);
28 if (!pmd_present(*pmd))
32 pte = pte_offset_kernel(pmd, address);
33 if (pte && !pte_present(*pte))
38 static struct page *split_large_page(unsigned long address, pgprot_t prot,
43 struct page *base = alloc_pages(GFP_KERNEL, 0);
48 * page_private is used to track the number of entries in
49 * the page table page have non standard attributes.
52 page_private(base) = 0;
54 address = __pa(address);
55 addr = address & LARGE_PAGE_MASK;
56 pbase = (pte_t *)page_address(base);
57 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
58 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
59 addr == address ? prot : ref_prot);
65 static void flush_kernel_map(void *address)
67 if (0 && address && cpu_has_clflush) {
68 /* is this worth it? */
70 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
71 asm volatile("clflush (%0)" :: "r" (address + i));
73 asm volatile("wbinvd":::"memory");
75 __flush_tlb_one(address);
81 static inline void flush_map(unsigned long address)
83 on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
86 static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
88 static inline void save_page(struct page *fpage)
90 fpage->lru.next = (struct list_head *)deferred_pages;
91 deferred_pages = fpage;
95 * No more special protections in this 2/4MB area - revert to a
98 static void revert_page(unsigned long address, pgprot_t ref_prot)
105 pgd = pgd_offset_k(address);
106 BUG_ON(pgd_none(*pgd));
107 pud = pud_offset(pgd,address);
108 BUG_ON(pud_none(*pud));
109 pmd = pmd_offset(pud, address);
110 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
111 pgprot_val(ref_prot) |= _PAGE_PSE;
112 large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
113 set_pte((pte_t *)pmd, large_pte);
117 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
121 struct page *kpte_page;
124 kpte = lookup_address(address);
126 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
127 kpte_flags = pte_val(*kpte);
128 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
129 if ((kpte_flags & _PAGE_PSE) == 0) {
130 set_pte(kpte, pfn_pte(pfn, prot));
133 * split_large_page will take the reference for this
134 * change_page_attr on the split page.
138 ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
140 split = split_large_page(address, prot, ref_prot2);
143 set_pte(kpte,mk_pte(split, ref_prot2));
146 page_private(kpte_page)++;
147 } else if ((kpte_flags & _PAGE_PSE) == 0) {
148 set_pte(kpte, pfn_pte(pfn, ref_prot));
149 BUG_ON(page_private(kpte_page) == 0);
150 page_private(kpte_page)--;
154 /* on x86-64 the direct mapping set at boot is not using 4k pages */
155 BUG_ON(PageReserved(kpte_page));
157 if (page_private(kpte_page) == 0) {
158 save_page(kpte_page);
159 revert_page(address, ref_prot);
165 * Change the page attributes of an page in the linear mapping.
167 * This should be used when a page is mapped with a different caching policy
168 * than write-back somewhere - some CPUs do not like it when mappings with
169 * different caching policies exist. This changes the page attributes of the
170 * in kernel linear mapping too.
172 * The caller needs to ensure that there are no conflicting mappings elsewhere.
173 * This function only deals with the kernel linear map.
175 * Caller must call global_flush_tlb() after this.
177 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
182 down_write(&init_mm.mmap_sem);
183 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
184 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
186 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
189 /* Handle kernel mapping too which aliases part of the
191 if (__pa(address) < KERNEL_TEXT_SIZE) {
193 pgprot_t prot2 = prot;
194 addr2 = __START_KERNEL_map + __pa(address);
195 pgprot_val(prot2) &= ~_PAGE_NX;
196 err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
199 up_write(&init_mm.mmap_sem);
203 /* Don't call this for MMIO areas that may not have a mem_map entry */
204 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
206 unsigned long addr = (unsigned long)page_address(page);
207 return change_page_attr_addr(addr, numpages, prot);
210 void global_flush_tlb(void)
214 down_read(&init_mm.mmap_sem);
215 dpage = xchg(&deferred_pages, NULL);
216 up_read(&init_mm.mmap_sem);
218 flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
220 struct page *tmp = dpage;
221 dpage = (struct page *)dpage->lru.next;
222 ClearPagePrivate(tmp);
227 EXPORT_SYMBOL(change_page_attr);
228 EXPORT_SYMBOL(global_flush_tlb);