2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
26 unsigned long __phys_addr(unsigned long x)
28 if (x >= __START_KERNEL_map)
29 return x - __START_KERNEL_map + phys_base;
30 return x - PAGE_OFFSET;
32 EXPORT_SYMBOL(__phys_addr);
34 static inline int phys_addr_valid(unsigned long addr)
36 return addr < (1UL << boot_cpu_data.x86_phys_bits);
41 static inline int phys_addr_valid(unsigned long addr)
48 int page_is_ram(unsigned long pagenr)
50 unsigned long addr, end;
54 * A special case is the first 4Kb of memory;
55 * This is a BIOS owned area, not kernel ram, but generally
56 * not listed as such in the E820 table.
62 * Second special case: Some BIOSen report the PC BIOS
63 * area (640->1Mb) as ram even though it is not.
65 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66 pagenr < (BIOS_END >> PAGE_SHIFT))
69 for (i = 0; i < e820.nr_map; i++) {
73 if (e820.map[i].type != E820_RAM)
75 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
79 if ((pagenr >= addr) && (pagenr < end))
86 * Fix up the linear direct mapping of the kernel to avoid cache attribute
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90 unsigned long prot_val)
92 unsigned long nrpages = size >> PAGE_SHIFT;
98 err = _set_memory_uc(vaddr, nrpages);
101 err = _set_memory_wc(vaddr, nrpages);
104 err = _set_memory_wb(vaddr, nrpages);
112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
120 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
121 unsigned long prot_val)
123 unsigned long pfn, offset, last_addr, vaddr;
124 struct vm_struct *area;
125 unsigned long new_prot_val;
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
133 if (!phys_addr_valid(phys_addr)) {
134 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
141 * Don't remap the low PCI/ISA area, it's always mapped..
143 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
144 return (__force void __iomem *)phys_to_virt(phys_addr);
147 * Don't allow anybody to remap normal RAM that we're using..
149 for (pfn = phys_addr >> PAGE_SHIFT;
150 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
152 int is_ram = page_is_ram(pfn);
154 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
156 WARN_ON_ONCE(is_ram);
160 * Mappings have to be page-aligned
162 offset = phys_addr & ~PAGE_MASK;
163 phys_addr &= PAGE_MASK;
164 size = PAGE_ALIGN(last_addr+1) - phys_addr;
166 if (reserve_memtype(phys_addr, phys_addr + size,
167 prot_val, &new_prot_val)) {
169 * Do not fallback to certain memory types with certain
171 * - request is uncached, return cannot be write-back
172 * - request is uncached, return cannot be write-combine
173 * - request is write-combine, return cannot be write-back
175 if ((prot_val == _PAGE_CACHE_UC &&
176 (new_prot_val == _PAGE_CACHE_WB ||
177 new_prot_val == _PAGE_CACHE_WC)) ||
178 (prot_val == _PAGE_CACHE_WC &&
179 new_prot_val == _PAGE_CACHE_WB)) {
181 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
182 phys_addr, phys_addr + size,
183 prot_val, new_prot_val);
184 free_memtype(phys_addr, phys_addr + size);
187 prot_val = new_prot_val;
193 prot = PAGE_KERNEL_NOCACHE;
196 prot = PAGE_KERNEL_WC;
206 area = get_vm_area(size, VM_IOREMAP);
209 area->phys_addr = phys_addr;
210 vaddr = (unsigned long) area->addr;
211 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
212 free_memtype(phys_addr, phys_addr + size);
217 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
218 free_memtype(phys_addr, phys_addr + size);
223 return (void __iomem *) (vaddr + offset);
227 * ioremap_nocache - map bus memory into CPU space
228 * @offset: bus address of the memory
229 * @size: size of the resource to map
231 * ioremap_nocache performs a platform specific sequence of operations to
232 * make bus memory CPU accessible via the readb/readw/readl/writeb/
233 * writew/writel functions and the other mmio helpers. The returned
234 * address is not guaranteed to be usable directly as a virtual
237 * This version of ioremap ensures that the memory is marked uncachable
238 * on the CPU as well as honouring existing caching rules from things like
239 * the PCI bus. Note that there are other caches and buffers on many
240 * busses. In particular driver authors should read up on PCI writes
242 * It's useful if some control registers are in such an area and
243 * write combining or read caching is not desirable:
245 * Must be freed with iounmap.
247 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
249 return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
251 EXPORT_SYMBOL(ioremap_nocache);
254 * ioremap_wc - map memory into CPU space write combined
255 * @offset: bus address of the memory
256 * @size: size of the resource to map
258 * This version of ioremap ensures that the memory is marked write combining.
259 * Write combining allows faster writes to some hardware devices.
261 * Must be freed with iounmap.
263 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
266 return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
268 return ioremap_nocache(phys_addr, size);
270 EXPORT_SYMBOL(ioremap_wc);
272 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
274 return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
276 EXPORT_SYMBOL(ioremap_cache);
279 * iounmap - Free a IO remapping
280 * @addr: virtual address from ioremap_*
282 * Caller must ensure there is only one unmapping for the same pointer.
284 void iounmap(volatile void __iomem *addr)
286 struct vm_struct *p, *o;
288 if ((void __force *)addr <= high_memory)
292 * __ioremap special-cases the PCI/ISA range by not instantiating a
293 * vm_area and by simply returning an address into the kernel mapping
294 * of ISA space. So handle that here.
296 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
297 addr < phys_to_virt(ISA_END_ADDRESS))
300 addr = (volatile void __iomem *)
301 (PAGE_MASK & (unsigned long __force)addr);
303 /* Use the vm area unlocked, assuming the caller
304 ensures there isn't another iounmap for the same address
305 in parallel. Reuse of the virtual address is prevented by
306 leaving it in the global lists until we're done with it.
307 cpa takes care of the direct mappings. */
308 read_lock(&vmlist_lock);
309 for (p = vmlist; p; p = p->next) {
313 read_unlock(&vmlist_lock);
316 printk(KERN_ERR "iounmap: bad address %p\n", addr);
321 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
323 /* Finally remove it */
324 o = remove_vm_area((void *)addr);
325 BUG_ON(p != o || o == NULL);
328 EXPORT_SYMBOL(iounmap);
332 int __initdata early_ioremap_debug;
334 static int __init early_ioremap_debug_setup(char *str)
336 early_ioremap_debug = 1;
340 early_param("early_ioremap_debug", early_ioremap_debug_setup);
342 static __initdata int after_paging_init;
343 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
344 __section(.bss.page_aligned);
346 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
348 /* Don't assume we're using swapper_pg_dir at this point */
349 pgd_t *base = __va(read_cr3());
350 pgd_t *pgd = &base[pgd_index(addr)];
351 pud_t *pud = pud_offset(pgd, addr);
352 pmd_t *pmd = pmd_offset(pud, addr);
357 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
359 return &bm_pte[pte_index(addr)];
362 void __init early_ioremap_init(void)
366 if (early_ioremap_debug)
367 printk(KERN_INFO "early_ioremap_init()\n");
369 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
370 memset(bm_pte, 0, sizeof(bm_pte));
371 pmd_populate_kernel(&init_mm, pmd, bm_pte);
374 * The boot-ioremap range spans multiple pmds, for which
375 * we are not prepared:
377 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
379 printk(KERN_WARNING "pmd %p != %p\n",
380 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
381 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
382 fix_to_virt(FIX_BTMAP_BEGIN));
383 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
384 fix_to_virt(FIX_BTMAP_END));
386 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
387 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
392 void __init early_ioremap_clear(void)
396 if (early_ioremap_debug)
397 printk(KERN_INFO "early_ioremap_clear()\n");
399 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
401 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
405 void __init early_ioremap_reset(void)
407 enum fixed_addresses idx;
408 unsigned long addr, phys;
411 after_paging_init = 1;
412 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
413 addr = fix_to_virt(idx);
414 pte = early_ioremap_pte(addr);
415 if (pte_present(*pte)) {
416 phys = pte_val(*pte) & PAGE_MASK;
417 set_fixmap(idx, phys);
422 static void __init __early_set_fixmap(enum fixed_addresses idx,
423 unsigned long phys, pgprot_t flags)
425 unsigned long addr = __fix_to_virt(idx);
428 if (idx >= __end_of_fixed_addresses) {
432 pte = early_ioremap_pte(addr);
433 if (pgprot_val(flags))
434 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
436 pte_clear(NULL, addr, pte);
437 __flush_tlb_one(addr);
440 static inline void __init early_set_fixmap(enum fixed_addresses idx,
443 if (after_paging_init)
444 set_fixmap(idx, phys);
446 __early_set_fixmap(idx, phys, PAGE_KERNEL);
449 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
451 if (after_paging_init)
454 __early_set_fixmap(idx, 0, __pgprot(0));
458 int __initdata early_ioremap_nested;
460 static int __init check_early_ioremap_leak(void)
462 if (!early_ioremap_nested)
466 "Debug warning: early ioremap leak of %d areas detected.\n",
467 early_ioremap_nested);
469 "please boot with early_ioremap_debug and report the dmesg.\n");
474 late_initcall(check_early_ioremap_leak);
476 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
478 unsigned long offset, last_addr;
479 unsigned int nrpages, nesting;
480 enum fixed_addresses idx0, idx;
482 WARN_ON(system_state != SYSTEM_BOOTING);
484 nesting = early_ioremap_nested;
485 if (early_ioremap_debug) {
486 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
487 phys_addr, size, nesting);
491 /* Don't allow wraparound or zero size */
492 last_addr = phys_addr + size - 1;
493 if (!size || last_addr < phys_addr) {
498 if (nesting >= FIX_BTMAPS_NESTING) {
502 early_ioremap_nested++;
504 * Mappings have to be page-aligned
506 offset = phys_addr & ~PAGE_MASK;
507 phys_addr &= PAGE_MASK;
508 size = PAGE_ALIGN(last_addr) - phys_addr;
511 * Mappings have to fit in the FIX_BTMAP area.
513 nrpages = size >> PAGE_SHIFT;
514 if (nrpages > NR_FIX_BTMAPS) {
522 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
524 while (nrpages > 0) {
525 early_set_fixmap(idx, phys_addr);
526 phys_addr += PAGE_SIZE;
530 if (early_ioremap_debug)
531 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
533 return (void *) (offset + fix_to_virt(idx0));
536 void __init early_iounmap(void *addr, unsigned long size)
538 unsigned long virt_addr;
539 unsigned long offset;
540 unsigned int nrpages;
541 enum fixed_addresses idx;
542 unsigned int nesting;
544 nesting = --early_ioremap_nested;
545 WARN_ON(nesting < 0);
547 if (early_ioremap_debug) {
548 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
553 virt_addr = (unsigned long)addr;
554 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
558 offset = virt_addr & ~PAGE_MASK;
559 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
561 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
562 while (nrpages > 0) {
563 early_clear_fixmap(idx);
569 void __this_fixmap_does_not_exist(void)
574 #endif /* CONFIG_X86_32 */