2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/uaccess.h>
16 #include <asm/pgalloc.h>
19 * We must allow the BIOS range to be executable:
21 #define BIOS_BEGIN 0x000a0000
22 #define BIOS_END 0x00100000
25 within(unsigned long addr, unsigned long start, unsigned long end)
27 return addr >= start && addr < end;
31 * Certain areas of memory on x86 require very specific protection flags,
32 * for example the BIOS area or kernel text. Callers don't always get this
33 * right (again, ioremap() on BIOS memory is not uncommon) so this function
34 * checks and fixes these known static required protection bits.
36 static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
38 pgprot_t forbidden = __pgprot(0);
41 * The BIOS area between 640k and 1Mb needs to be executable for
42 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
44 if (within(__pa(address), BIOS_BEGIN, BIOS_END))
45 pgprot_val(forbidden) |= _PAGE_NX;
48 * The kernel text needs to be executable for obvious reasons
49 * Does not cover __inittext since that is gone later on
51 if (within(address, (unsigned long)_text, (unsigned long)_etext))
52 pgprot_val(forbidden) |= _PAGE_NX;
54 #ifdef CONFIG_DEBUG_RODATA
55 /* The .rodata section needs to be read-only */
56 if (within(address, (unsigned long)__start_rodata,
57 (unsigned long)__end_rodata))
58 pgprot_val(forbidden) |= _PAGE_RW;
61 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
66 pte_t *lookup_address(unsigned long address, int *level)
68 pgd_t *pgd = pgd_offset_k(address);
72 *level = PG_LEVEL_NONE;
76 pud = pud_offset(pgd, address);
79 pmd = pmd_offset(pud, address);
88 return pte_offset_kernel(pmd, address);
91 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
94 set_pte_atomic(kpte, pte);
96 if (!SHARED_KERNEL_PMD) {
99 for (page = pgd_list; page; page = (struct page *)page->index) {
104 pgd = (pgd_t *)page_address(page) + pgd_index(address);
105 pud = pud_offset(pgd, address);
106 pmd = pmd_offset(pud, address);
107 set_pte_atomic((pte_t *)pmd, pte);
113 static int split_large_page(pte_t *kpte, unsigned long address)
115 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
116 gfp_t gfp_flags = GFP_KERNEL;
123 #ifdef CONFIG_DEBUG_PAGEALLOC
124 gfp_flags = GFP_ATOMIC;
126 base = alloc_pages(gfp_flags, 0);
130 spin_lock_irqsave(&pgd_lock, flags);
132 * Check for races, another CPU might have split this page
135 tmp = lookup_address(address, &level);
141 address = __pa(address);
142 addr = address & LARGE_PAGE_MASK;
143 pbase = (pte_t *)page_address(base);
145 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
148 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
149 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
152 * Install the new, split up pagetable. Important detail here:
154 * On Intel the NX bit of all levels must be cleared to make a
155 * page executable. See section 4.13.2 of Intel 64 and IA-32
156 * Architectures Software Developer's Manual).
158 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
159 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
163 spin_unlock_irqrestore(&pgd_lock, flags);
166 __free_pages(base, 0);
172 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
174 struct page *kpte_page;
179 BUG_ON(pfn > max_low_pfn);
183 kpte = lookup_address(address, &level);
187 kpte_page = virt_to_page(kpte);
188 BUG_ON(PageLRU(kpte_page));
189 BUG_ON(PageCompound(kpte_page));
191 prot = static_protections(prot, address);
193 if (level == PG_LEVEL_4K) {
194 set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
196 err = split_large_page(kpte, address);
204 * change_page_attr_addr - Change page table attributes in linear mapping
205 * @address: Virtual address in linear mapping.
206 * @numpages: Number of pages to change
207 * @prot: New page table attribute (PAGE_*)
209 * Change page attributes of a page in the direct mapping. This is a variant
210 * of change_page_attr() that also works on memory holes that do not have
211 * mem_map entry (pfn_valid() is false).
213 * See change_page_attr() documentation for more details.
215 * Modules and drivers should use the set_memory_* APIs instead.
218 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
220 int err = 0, kernel_map = 0, i;
223 if (address >= __START_KERNEL_map &&
224 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
226 address = (unsigned long)__va(__pa(address));
231 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
232 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
234 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
235 err = __change_page_attr(address, pfn, prot);
241 * Handle kernel mapping too which aliases part of
244 if (__pa(address) < KERNEL_TEXT_SIZE) {
248 addr2 = __START_KERNEL_map + __pa(address);
249 /* Make sure the kernel mappings stay executable */
250 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
251 err = __change_page_attr(addr2, pfn, prot2);
260 * change_page_attr - Change page table attributes in the linear mapping.
261 * @page: First page to change
262 * @numpages: Number of pages to change
263 * @prot: New protection/caching type (PAGE_*)
265 * Returns 0 on success, otherwise a negated errno.
267 * This should be used when a page is mapped with a different caching policy
268 * than write-back somewhere - some CPUs do not like it when mappings with
269 * different caching policies exist. This changes the page attributes of the
270 * in kernel linear mapping too.
272 * Caller must call global_flush_tlb() later to make the changes active.
274 * The caller needs to ensure that there are no conflicting mappings elsewhere
275 * (e.g. in user space) * This function only deals with the kernel linear map.
277 * For MMIO areas without mem_map use change_page_attr_addr() instead.
279 * Modules and drivers should use the set_pages_* APIs instead.
281 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
283 unsigned long addr = (unsigned long)page_address(page);
285 return change_page_attr_addr(addr, numpages, prot);
287 EXPORT_SYMBOL(change_page_attr);
290 * change_page_attr_set - Change page table attributes in the linear mapping.
291 * @addr: Virtual address in linear mapping.
292 * @numpages: Number of pages to change
293 * @prot: Protection/caching type bits to set (PAGE_*)
295 * Returns 0 on success, otherwise a negated errno.
297 * This should be used when a page is mapped with a different caching policy
298 * than write-back somewhere - some CPUs do not like it when mappings with
299 * different caching policies exist. This changes the page attributes of the
300 * in kernel linear mapping too.
302 * Caller must call global_flush_tlb() later to make the changes active.
304 * The caller needs to ensure that there are no conflicting mappings elsewhere
305 * (e.g. in user space) * This function only deals with the kernel linear map.
307 * This function is different from change_page_attr() in that only selected bits
308 * are impacted, all other bits remain as is.
310 int change_page_attr_set(unsigned long addr, int numpages, pgprot_t prot)
312 pgprot_t current_prot;
316 pte = lookup_address(addr, &level);
318 current_prot = pte_pgprot(*pte);
320 pgprot_val(current_prot) = 0;
322 pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
324 return change_page_attr_addr(addr, numpages, prot);
328 * change_page_attr_clear - Change page table attributes in the linear mapping.
329 * @addr: Virtual address in linear mapping.
330 * @numpages: Number of pages to change
331 * @prot: Protection/caching type bits to clear (PAGE_*)
333 * Returns 0 on success, otherwise a negated errno.
335 * This should be used when a page is mapped with a different caching policy
336 * than write-back somewhere - some CPUs do not like it when mappings with
337 * different caching policies exist. This changes the page attributes of the
338 * in kernel linear mapping too.
340 * Caller must call global_flush_tlb() later to make the changes active.
342 * The caller needs to ensure that there are no conflicting mappings elsewhere
343 * (e.g. in user space) * This function only deals with the kernel linear map.
345 * This function is different from change_page_attr() in that only selected bits
346 * are impacted, all other bits remain as is.
348 int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot)
350 pgprot_t current_prot;
354 pte = lookup_address(addr, &level);
356 current_prot = pte_pgprot(*pte);
358 pgprot_val(current_prot) = 0;
360 pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot);
362 return change_page_attr_addr(addr, numpages, prot);
367 int set_memory_uc(unsigned long addr, int numpages)
371 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
372 return change_page_attr_set(addr, numpages, uncached);
374 EXPORT_SYMBOL(set_memory_uc);
376 int set_memory_wb(unsigned long addr, int numpages)
380 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
381 return change_page_attr_clear(addr, numpages, uncached);
383 EXPORT_SYMBOL(set_memory_wb);
385 int set_memory_x(unsigned long addr, int numpages)
389 pgprot_val(nx) = _PAGE_NX;
390 return change_page_attr_clear(addr, numpages, nx);
392 EXPORT_SYMBOL(set_memory_x);
394 int set_memory_nx(unsigned long addr, int numpages)
398 pgprot_val(nx) = _PAGE_NX;
399 return change_page_attr_set(addr, numpages, nx);
401 EXPORT_SYMBOL(set_memory_nx);
403 int set_memory_ro(unsigned long addr, int numpages)
407 pgprot_val(rw) = _PAGE_RW;
408 return change_page_attr_clear(addr, numpages, rw);
410 EXPORT_SYMBOL(set_memory_ro);
412 int set_memory_rw(unsigned long addr, int numpages)
416 pgprot_val(rw) = _PAGE_RW;
417 return change_page_attr_set(addr, numpages, rw);
419 EXPORT_SYMBOL(set_memory_rw);
421 int set_pages_uc(struct page *page, int numpages)
423 unsigned long addr = (unsigned long)page_address(page);
426 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
427 return change_page_attr_set(addr, numpages, uncached);
429 EXPORT_SYMBOL(set_pages_uc);
431 int set_pages_wb(struct page *page, int numpages)
433 unsigned long addr = (unsigned long)page_address(page);
436 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
437 return change_page_attr_clear(addr, numpages, uncached);
439 EXPORT_SYMBOL(set_pages_wb);
441 int set_pages_x(struct page *page, int numpages)
443 unsigned long addr = (unsigned long)page_address(page);
446 pgprot_val(nx) = _PAGE_NX;
447 return change_page_attr_clear(addr, numpages, nx);
449 EXPORT_SYMBOL(set_pages_x);
451 int set_pages_nx(struct page *page, int numpages)
453 unsigned long addr = (unsigned long)page_address(page);
456 pgprot_val(nx) = _PAGE_NX;
457 return change_page_attr_set(addr, numpages, nx);
459 EXPORT_SYMBOL(set_pages_nx);
461 int set_pages_ro(struct page *page, int numpages)
463 unsigned long addr = (unsigned long)page_address(page);
466 pgprot_val(rw) = _PAGE_RW;
467 return change_page_attr_clear(addr, numpages, rw);
469 EXPORT_SYMBOL(set_pages_ro);
471 int set_pages_rw(struct page *page, int numpages)
473 unsigned long addr = (unsigned long)page_address(page);
476 pgprot_val(rw) = _PAGE_RW;
477 return change_page_attr_set(addr, numpages, rw);
479 EXPORT_SYMBOL(set_pages_rw);
482 void clflush_cache_range(void *addr, int size)
486 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
490 static void flush_kernel_map(void *arg)
493 * Flush all to work around Errata in early athlons regarding
494 * large page flushing.
498 if (boot_cpu_data.x86_model >= 4)
502 void global_flush_tlb(void)
504 BUG_ON(irqs_disabled());
506 on_each_cpu(flush_kernel_map, NULL, 1, 1);
508 EXPORT_SYMBOL(global_flush_tlb);
510 #ifdef CONFIG_DEBUG_PAGEALLOC
511 void kernel_map_pages(struct page *page, int numpages, int enable)
513 if (PageHighMem(page))
516 debug_check_no_locks_freed(page_address(page),
517 numpages * PAGE_SIZE);
521 * If page allocator is not up yet then do not call c_p_a():
523 if (!debug_pagealloc_enabled)
527 * The return value is ignored - the calls cannot fail,
528 * large pages are disabled at boot time:
530 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
533 * We should perform an IPI and flush all tlbs,
534 * but that can deadlock->flush only current cpu: