2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
25 unsigned long __phys_addr(unsigned long x)
27 if (x >= __START_KERNEL_map)
28 return x - __START_KERNEL_map + phys_base;
29 return x - PAGE_OFFSET;
31 EXPORT_SYMBOL(__phys_addr);
33 static inline int phys_addr_valid(unsigned long addr)
35 return addr < (1UL << boot_cpu_data.x86_phys_bits);
40 static inline int phys_addr_valid(unsigned long addr)
47 int page_is_ram(unsigned long pagenr)
49 unsigned long addr, end;
53 * A special case is the first 4Kb of memory;
54 * This is a BIOS owned area, not kernel ram, but generally
55 * not listed as such in the E820 table.
61 * Second special case: Some BIOSen report the PC BIOS
62 * area (640->1Mb) as ram even though it is not.
64 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
65 pagenr < (BIOS_END >> PAGE_SHIFT))
68 for (i = 0; i < e820.nr_map; i++) {
72 if (e820.map[i].type != E820_RAM)
74 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
75 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
78 if ((pagenr >= addr) && (pagenr < end))
85 * Fix up the linear direct mapping of the kernel to avoid cache attribute
88 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
89 unsigned long prot_val)
91 unsigned long nrpages = size >> PAGE_SHIFT;
97 err = set_memory_uc(vaddr, nrpages);
100 err = set_memory_wb(vaddr, nrpages);
108 * Remap an arbitrary physical address space into the kernel virtual
109 * address space. Needed when the kernel wants to access high addresses
112 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
113 * have to convert them into an offset in a page-aligned mapping, but the
114 * caller shouldn't need to know that small detail.
116 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
117 unsigned long prot_val)
119 unsigned long pfn, offset, last_addr, vaddr;
120 struct vm_struct *area;
123 /* Don't allow wraparound or zero size */
124 last_addr = phys_addr + size - 1;
125 if (!size || last_addr < phys_addr)
128 if (!phys_addr_valid(phys_addr)) {
129 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
136 * Don't remap the low PCI/ISA area, it's always mapped..
138 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
139 return (__force void __iomem *)phys_to_virt(phys_addr);
142 * Don't allow anybody to remap normal RAM that we're using..
144 for (pfn = phys_addr >> PAGE_SHIFT;
145 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
147 int is_ram = page_is_ram(pfn);
149 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
151 WARN_ON_ONCE(is_ram);
157 prot = PAGE_KERNEL_NOCACHE;
165 * Mappings have to be page-aligned
167 offset = phys_addr & ~PAGE_MASK;
168 phys_addr &= PAGE_MASK;
169 size = PAGE_ALIGN(last_addr+1) - phys_addr;
174 area = get_vm_area(size, VM_IOREMAP);
177 area->phys_addr = phys_addr;
178 vaddr = (unsigned long) area->addr;
179 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
184 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
189 return (void __iomem *) (vaddr + offset);
193 * ioremap_nocache - map bus memory into CPU space
194 * @offset: bus address of the memory
195 * @size: size of the resource to map
197 * ioremap_nocache performs a platform specific sequence of operations to
198 * make bus memory CPU accessible via the readb/readw/readl/writeb/
199 * writew/writel functions and the other mmio helpers. The returned
200 * address is not guaranteed to be usable directly as a virtual
203 * This version of ioremap ensures that the memory is marked uncachable
204 * on the CPU as well as honouring existing caching rules from things like
205 * the PCI bus. Note that there are other caches and buffers on many
206 * busses. In particular driver authors should read up on PCI writes
208 * It's useful if some control registers are in such an area and
209 * write combining or read caching is not desirable:
211 * Must be freed with iounmap.
213 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
215 return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
217 EXPORT_SYMBOL(ioremap_nocache);
219 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
221 return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
223 EXPORT_SYMBOL(ioremap_cache);
226 * iounmap - Free a IO remapping
227 * @addr: virtual address from ioremap_*
229 * Caller must ensure there is only one unmapping for the same pointer.
231 void iounmap(volatile void __iomem *addr)
233 struct vm_struct *p, *o;
235 if ((void __force *)addr <= high_memory)
239 * __ioremap special-cases the PCI/ISA range by not instantiating a
240 * vm_area and by simply returning an address into the kernel mapping
241 * of ISA space. So handle that here.
243 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
244 addr < phys_to_virt(ISA_END_ADDRESS))
247 addr = (volatile void __iomem *)
248 (PAGE_MASK & (unsigned long __force)addr);
250 /* Use the vm area unlocked, assuming the caller
251 ensures there isn't another iounmap for the same address
252 in parallel. Reuse of the virtual address is prevented by
253 leaving it in the global lists until we're done with it.
254 cpa takes care of the direct mappings. */
255 read_lock(&vmlist_lock);
256 for (p = vmlist; p; p = p->next) {
260 read_unlock(&vmlist_lock);
263 printk(KERN_ERR "iounmap: bad address %p\n", addr);
268 /* Finally remove it */
269 o = remove_vm_area((void *)addr);
270 BUG_ON(p != o || o == NULL);
273 EXPORT_SYMBOL(iounmap);
277 int __initdata early_ioremap_debug;
279 static int __init early_ioremap_debug_setup(char *str)
281 early_ioremap_debug = 1;
285 early_param("early_ioremap_debug", early_ioremap_debug_setup);
287 static __initdata int after_paging_init;
288 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
289 __section(.bss.page_aligned);
291 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
293 /* Don't assume we're using swapper_pg_dir at this point */
294 pgd_t *base = __va(read_cr3());
295 pgd_t *pgd = &base[pgd_index(addr)];
296 pud_t *pud = pud_offset(pgd, addr);
297 pmd_t *pmd = pmd_offset(pud, addr);
302 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
304 return &bm_pte[pte_index(addr)];
307 void __init early_ioremap_init(void)
311 if (early_ioremap_debug)
312 printk(KERN_INFO "early_ioremap_init()\n");
314 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
315 memset(bm_pte, 0, sizeof(bm_pte));
316 pmd_populate_kernel(&init_mm, pmd, bm_pte);
319 * The boot-ioremap range spans multiple pmds, for which
320 * we are not prepared:
322 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
324 printk(KERN_WARNING "pmd %p != %p\n",
325 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
326 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
327 fix_to_virt(FIX_BTMAP_BEGIN));
328 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
329 fix_to_virt(FIX_BTMAP_END));
331 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
332 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
337 void __init early_ioremap_clear(void)
341 if (early_ioremap_debug)
342 printk(KERN_INFO "early_ioremap_clear()\n");
344 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
346 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
350 void __init early_ioremap_reset(void)
352 enum fixed_addresses idx;
353 unsigned long addr, phys;
356 after_paging_init = 1;
357 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
358 addr = fix_to_virt(idx);
359 pte = early_ioremap_pte(addr);
360 if (pte_present(*pte)) {
361 phys = pte_val(*pte) & PAGE_MASK;
362 set_fixmap(idx, phys);
367 static void __init __early_set_fixmap(enum fixed_addresses idx,
368 unsigned long phys, pgprot_t flags)
370 unsigned long addr = __fix_to_virt(idx);
373 if (idx >= __end_of_fixed_addresses) {
377 pte = early_ioremap_pte(addr);
378 if (pgprot_val(flags))
379 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
381 pte_clear(NULL, addr, pte);
382 __flush_tlb_one(addr);
385 static inline void __init early_set_fixmap(enum fixed_addresses idx,
388 if (after_paging_init)
389 set_fixmap(idx, phys);
391 __early_set_fixmap(idx, phys, PAGE_KERNEL);
394 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
396 if (after_paging_init)
399 __early_set_fixmap(idx, 0, __pgprot(0));
403 int __initdata early_ioremap_nested;
405 static int __init check_early_ioremap_leak(void)
407 if (!early_ioremap_nested)
411 "Debug warning: early ioremap leak of %d areas detected.\n",
412 early_ioremap_nested);
414 "please boot with early_ioremap_debug and report the dmesg.\n");
419 late_initcall(check_early_ioremap_leak);
421 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
423 unsigned long offset, last_addr;
424 unsigned int nrpages, nesting;
425 enum fixed_addresses idx0, idx;
427 WARN_ON(system_state != SYSTEM_BOOTING);
429 nesting = early_ioremap_nested;
430 if (early_ioremap_debug) {
431 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
432 phys_addr, size, nesting);
436 /* Don't allow wraparound or zero size */
437 last_addr = phys_addr + size - 1;
438 if (!size || last_addr < phys_addr) {
443 if (nesting >= FIX_BTMAPS_NESTING) {
447 early_ioremap_nested++;
449 * Mappings have to be page-aligned
451 offset = phys_addr & ~PAGE_MASK;
452 phys_addr &= PAGE_MASK;
453 size = PAGE_ALIGN(last_addr) - phys_addr;
456 * Mappings have to fit in the FIX_BTMAP area.
458 nrpages = size >> PAGE_SHIFT;
459 if (nrpages > NR_FIX_BTMAPS) {
467 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
469 while (nrpages > 0) {
470 early_set_fixmap(idx, phys_addr);
471 phys_addr += PAGE_SIZE;
475 if (early_ioremap_debug)
476 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
478 return (void *) (offset + fix_to_virt(idx0));
481 void __init early_iounmap(void *addr, unsigned long size)
483 unsigned long virt_addr;
484 unsigned long offset;
485 unsigned int nrpages;
486 enum fixed_addresses idx;
487 unsigned int nesting;
489 nesting = --early_ioremap_nested;
490 WARN_ON(nesting < 0);
492 if (early_ioremap_debug) {
493 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
498 virt_addr = (unsigned long)addr;
499 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
503 offset = virt_addr & ~PAGE_MASK;
504 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
506 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
507 while (nrpages > 0) {
508 early_clear_fixmap(idx);
514 void __this_fixmap_does_not_exist(void)
519 #endif /* CONFIG_X86_32 */