2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
27 static inline int phys_addr_valid(unsigned long addr)
29 return addr < (1UL << boot_cpu_data.x86_phys_bits);
32 unsigned long __phys_addr(unsigned long x)
34 if (x >= __START_KERNEL_map) {
35 x -= __START_KERNEL_map;
36 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
39 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
41 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
46 EXPORT_SYMBOL(__phys_addr);
48 bool __virt_addr_valid(unsigned long x)
50 if (x >= __START_KERNEL_map) {
51 x -= __START_KERNEL_map;
52 if (x >= KERNEL_IMAGE_SIZE)
59 if (system_state == SYSTEM_BOOTING ?
60 x > MAXMEM : !phys_addr_valid(x)) {
65 return pfn_valid(x >> PAGE_SHIFT);
67 EXPORT_SYMBOL(__virt_addr_valid);
71 static inline int phys_addr_valid(unsigned long addr)
76 #ifdef CONFIG_DEBUG_VIRTUAL
77 unsigned long __phys_addr(unsigned long x)
79 /* VMALLOC_* aren't constants; not available at the boot time */
80 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81 VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82 is_vmalloc_addr((void *) x));
83 return x - PAGE_OFFSET;
85 EXPORT_SYMBOL(__phys_addr);
88 bool __virt_addr_valid(unsigned long x)
92 if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
96 EXPORT_SYMBOL(__virt_addr_valid);
100 int page_is_ram(unsigned long pagenr)
102 resource_size_t addr, end;
106 * A special case is the first 4Kb of memory;
107 * This is a BIOS owned area, not kernel ram, but generally
108 * not listed as such in the E820 table.
114 * Second special case: Some BIOSen report the PC BIOS
115 * area (640->1Mb) as ram even though it is not.
117 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118 pagenr < (BIOS_END >> PAGE_SHIFT))
121 for (i = 0; i < e820.nr_map; i++) {
125 if (e820.map[i].type != E820_RAM)
127 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
131 if ((pagenr >= addr) && (pagenr < end))
137 int pagerange_is_ram(unsigned long start, unsigned long end)
139 int ram_page = 0, not_rampage = 0;
140 unsigned long page_nr;
142 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
144 if (page_is_ram(page_nr))
149 if (ram_page == not_rampage)
157 * Fix up the linear direct mapping of the kernel to avoid cache attribute
160 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161 unsigned long prot_val)
163 unsigned long nrpages = size >> PAGE_SHIFT;
169 err = _set_memory_uc(vaddr, nrpages);
172 err = _set_memory_wc(vaddr, nrpages);
175 err = _set_memory_wb(vaddr, nrpages);
183 * Remap an arbitrary physical address space into the kernel virtual
184 * address space. Needed when the kernel wants to access high addresses
187 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188 * have to convert them into an offset in a page-aligned mapping, but the
189 * caller shouldn't need to know that small detail.
191 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192 unsigned long size, unsigned long prot_val, void *caller)
194 unsigned long pfn, offset, vaddr;
195 resource_size_t last_addr;
196 const resource_size_t unaligned_phys_addr = phys_addr;
197 const unsigned long unaligned_size = size;
198 struct vm_struct *area;
199 unsigned long new_prot_val;
202 void __iomem *ret_addr;
204 /* Don't allow wraparound or zero size */
205 last_addr = phys_addr + size - 1;
206 if (!size || last_addr < phys_addr)
209 if (!phys_addr_valid(phys_addr)) {
210 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
211 (unsigned long long)phys_addr);
217 * Don't remap the low PCI/ISA area, it's always mapped..
219 if (is_ISA_range(phys_addr, last_addr))
220 return (__force void __iomem *)phys_to_virt(phys_addr);
223 * Check if the request spans more than any BAR in the iomem resource
226 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
227 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
230 * Don't allow anybody to remap normal RAM that we're using..
232 for (pfn = phys_addr >> PAGE_SHIFT;
233 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
236 int is_ram = page_is_ram(pfn);
238 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
240 WARN_ON_ONCE(is_ram);
244 * Mappings have to be page-aligned
246 offset = phys_addr & ~PAGE_MASK;
247 phys_addr &= PAGE_MASK;
248 size = PAGE_ALIGN(last_addr+1) - phys_addr;
250 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
251 prot_val, &new_prot_val);
253 pr_debug("Warning: reserve_memtype returned %d\n", retval);
257 if (prot_val != new_prot_val) {
259 * Do not fallback to certain memory types with certain
261 * - request is uc-, return cannot be write-back
262 * - request is uc-, return cannot be write-combine
263 * - request is write-combine, return cannot be write-back
265 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
266 (new_prot_val == _PAGE_CACHE_WB ||
267 new_prot_val == _PAGE_CACHE_WC)) ||
268 (prot_val == _PAGE_CACHE_WC &&
269 new_prot_val == _PAGE_CACHE_WB)) {
271 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
272 (unsigned long long)phys_addr,
273 (unsigned long long)(phys_addr + size),
274 prot_val, new_prot_val);
275 free_memtype(phys_addr, phys_addr + size);
278 prot_val = new_prot_val;
284 prot = PAGE_KERNEL_IO_NOCACHE;
286 case _PAGE_CACHE_UC_MINUS:
287 prot = PAGE_KERNEL_IO_UC_MINUS;
290 prot = PAGE_KERNEL_IO_WC;
293 prot = PAGE_KERNEL_IO;
300 area = get_vm_area_caller(size, VM_IOREMAP, caller);
303 area->phys_addr = phys_addr;
304 vaddr = (unsigned long) area->addr;
305 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
306 free_memtype(phys_addr, phys_addr + size);
311 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
312 free_memtype(phys_addr, phys_addr + size);
317 ret_addr = (void __iomem *) (vaddr + offset);
318 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
324 * ioremap_nocache - map bus memory into CPU space
325 * @offset: bus address of the memory
326 * @size: size of the resource to map
328 * ioremap_nocache performs a platform specific sequence of operations to
329 * make bus memory CPU accessible via the readb/readw/readl/writeb/
330 * writew/writel functions and the other mmio helpers. The returned
331 * address is not guaranteed to be usable directly as a virtual
334 * This version of ioremap ensures that the memory is marked uncachable
335 * on the CPU as well as honouring existing caching rules from things like
336 * the PCI bus. Note that there are other caches and buffers on many
337 * busses. In particular driver authors should read up on PCI writes
339 * It's useful if some control registers are in such an area and
340 * write combining or read caching is not desirable:
342 * Must be freed with iounmap.
344 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
347 * Ideally, this should be:
348 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
350 * Till we fix all X drivers to use ioremap_wc(), we will use
353 unsigned long val = _PAGE_CACHE_UC_MINUS;
355 return __ioremap_caller(phys_addr, size, val,
356 __builtin_return_address(0));
358 EXPORT_SYMBOL(ioremap_nocache);
361 * ioremap_wc - map memory into CPU space write combined
362 * @offset: bus address of the memory
363 * @size: size of the resource to map
365 * This version of ioremap ensures that the memory is marked write combining.
366 * Write combining allows faster writes to some hardware devices.
368 * Must be freed with iounmap.
370 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
373 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
374 __builtin_return_address(0));
376 return ioremap_nocache(phys_addr, size);
378 EXPORT_SYMBOL(ioremap_wc);
380 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
382 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
383 __builtin_return_address(0));
385 EXPORT_SYMBOL(ioremap_cache);
387 static void __iomem *ioremap_default(resource_size_t phys_addr,
395 * - WB for WB-able memory and no other conflicting mappings
396 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
397 * - Inherit from confliting mappings otherwise
399 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
403 ret = __ioremap_caller(phys_addr, size, flags,
404 __builtin_return_address(0));
406 free_memtype(phys_addr, phys_addr + size);
410 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
411 unsigned long prot_val)
413 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
414 __builtin_return_address(0));
416 EXPORT_SYMBOL(ioremap_prot);
419 * iounmap - Free a IO remapping
420 * @addr: virtual address from ioremap_*
422 * Caller must ensure there is only one unmapping for the same pointer.
424 void iounmap(volatile void __iomem *addr)
426 struct vm_struct *p, *o;
428 if ((void __force *)addr <= high_memory)
432 * __ioremap special-cases the PCI/ISA range by not instantiating a
433 * vm_area and by simply returning an address into the kernel mapping
434 * of ISA space. So handle that here.
436 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
437 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
440 addr = (volatile void __iomem *)
441 (PAGE_MASK & (unsigned long __force)addr);
443 mmiotrace_iounmap(addr);
445 /* Use the vm area unlocked, assuming the caller
446 ensures there isn't another iounmap for the same address
447 in parallel. Reuse of the virtual address is prevented by
448 leaving it in the global lists until we're done with it.
449 cpa takes care of the direct mappings. */
450 read_lock(&vmlist_lock);
451 for (p = vmlist; p; p = p->next) {
452 if (p->addr == (void __force *)addr)
455 read_unlock(&vmlist_lock);
458 printk(KERN_ERR "iounmap: bad address %p\n", addr);
463 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
465 /* Finally remove it */
466 o = remove_vm_area((void __force *)addr);
467 BUG_ON(p != o || o == NULL);
470 EXPORT_SYMBOL(iounmap);
473 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
476 void *xlate_dev_mem_ptr(unsigned long phys)
479 unsigned long start = phys & PAGE_MASK;
481 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
482 if (page_is_ram(start >> PAGE_SHIFT))
485 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
487 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
492 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
494 if (page_is_ram(phys >> PAGE_SHIFT))
497 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
501 static int __initdata early_ioremap_debug;
503 static int __init early_ioremap_debug_setup(char *str)
505 early_ioremap_debug = 1;
509 early_param("early_ioremap_debug", early_ioremap_debug_setup);
511 static __initdata int after_paging_init;
512 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
514 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
516 /* Don't assume we're using swapper_pg_dir at this point */
517 pgd_t *base = __va(read_cr3());
518 pgd_t *pgd = &base[pgd_index(addr)];
519 pud_t *pud = pud_offset(pgd, addr);
520 pmd_t *pmd = pmd_offset(pud, addr);
525 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
527 return &bm_pte[pte_index(addr)];
530 void __init early_ioremap_init(void)
534 if (early_ioremap_debug)
535 printk(KERN_INFO "early_ioremap_init()\n");
537 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
538 memset(bm_pte, 0, sizeof(bm_pte));
539 pmd_populate_kernel(&init_mm, pmd, bm_pte);
542 * The boot-ioremap range spans multiple pmds, for which
543 * we are not prepared:
545 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
547 printk(KERN_WARNING "pmd %p != %p\n",
548 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
549 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
550 fix_to_virt(FIX_BTMAP_BEGIN));
551 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
552 fix_to_virt(FIX_BTMAP_END));
554 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
555 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
560 void __init early_ioremap_reset(void)
562 after_paging_init = 1;
565 static void __init __early_set_fixmap(enum fixed_addresses idx,
566 unsigned long phys, pgprot_t flags)
568 unsigned long addr = __fix_to_virt(idx);
571 if (idx >= __end_of_fixed_addresses) {
575 pte = early_ioremap_pte(addr);
577 if (pgprot_val(flags))
578 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
580 pte_clear(&init_mm, addr, pte);
581 __flush_tlb_one(addr);
584 static inline void __init early_set_fixmap(enum fixed_addresses idx,
585 unsigned long phys, pgprot_t prot)
587 if (after_paging_init)
588 __set_fixmap(idx, phys, prot);
590 __early_set_fixmap(idx, phys, prot);
593 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
595 if (after_paging_init)
598 __early_set_fixmap(idx, 0, __pgprot(0));
601 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
602 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
603 static int __init check_early_ioremap_leak(void)
608 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
615 "Debug warning: early ioremap leak of %d areas detected.\n",
618 "please boot with early_ioremap_debug and report the dmesg.\n");
622 late_initcall(check_early_ioremap_leak);
624 static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
626 unsigned long offset, last_addr;
627 unsigned int nrpages;
628 enum fixed_addresses idx0, idx;
631 WARN_ON(system_state != SYSTEM_BOOTING);
634 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
642 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
648 if (early_ioremap_debug) {
649 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
650 phys_addr, size, slot);
654 /* Don't allow wraparound or zero size */
655 last_addr = phys_addr + size - 1;
656 if (!size || last_addr < phys_addr) {
661 prev_size[slot] = size;
663 * Mappings have to be page-aligned
665 offset = phys_addr & ~PAGE_MASK;
666 phys_addr &= PAGE_MASK;
667 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
670 * Mappings have to fit in the FIX_BTMAP area.
672 nrpages = size >> PAGE_SHIFT;
673 if (nrpages > NR_FIX_BTMAPS) {
681 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
683 while (nrpages > 0) {
684 early_set_fixmap(idx, phys_addr, prot);
685 phys_addr += PAGE_SIZE;
689 if (early_ioremap_debug)
690 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
692 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
693 return prev_map[slot];
696 /* Remap an IO device */
697 void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
699 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
703 void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
705 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
708 void __init early_iounmap(void __iomem *addr, unsigned long size)
710 unsigned long virt_addr;
711 unsigned long offset;
712 unsigned int nrpages;
713 enum fixed_addresses idx;
717 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
718 if (prev_map[i] == addr) {
725 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
731 if (prev_size[slot] != size) {
732 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
733 addr, size, slot, prev_size[slot]);
738 if (early_ioremap_debug) {
739 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
744 virt_addr = (unsigned long)addr;
745 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
749 offset = virt_addr & ~PAGE_MASK;
750 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
752 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
753 while (nrpages > 0) {
754 early_clear_fixmap(idx);
758 prev_map[slot] = NULL;
761 void __this_fixmap_does_not_exist(void)