2 * arch/x86_64/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <asm/pgalloc.h>
17 #include <asm/fixmap.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/proto.h>
22 #define ISA_START_ADDRESS 0xa0000
23 #define ISA_END_ADDRESS 0x100000
26 * Fix up the linear direct mapping of the kernel to avoid cache attribute
30 ioremap_change_attr(unsigned long phys_addr, unsigned long size,
34 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
35 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
36 unsigned long vaddr = (unsigned long) __va(phys_addr);
39 * Must use a address here and not struct page because the phys addr
40 * can be a in hole between nodes and not have an memmap entry.
42 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
50 * Generic mapping function
54 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
62 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
65 struct vm_struct * area;
66 unsigned long offset, last_addr;
69 /* Don't allow wraparound or zero size */
70 last_addr = phys_addr + size - 1;
71 if (!size || last_addr < phys_addr)
75 * Don't remap the low PCI/ISA area, it's always mapped..
77 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
78 return (__force void __iomem *)phys_to_virt(phys_addr);
82 * Don't allow anybody to remap normal RAM that we're using..
84 if (last_addr < virt_to_phys(high_memory)) {
88 t_addr = __va(phys_addr);
89 t_end = t_addr + (size - 1);
91 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
92 if(!PageReserved(page))
97 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL
98 | _PAGE_DIRTY | _PAGE_ACCESSED | flags);
100 * Mappings have to be page-aligned
102 offset = phys_addr & ~PAGE_MASK;
103 phys_addr &= PAGE_MASK;
104 size = PAGE_ALIGN(last_addr+1) - phys_addr;
109 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
112 area->phys_addr = phys_addr;
114 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
115 phys_addr, pgprot)) {
116 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
119 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
120 area->flags &= 0xffffff;
124 return (__force void __iomem *) (offset + (char *)addr);
126 EXPORT_SYMBOL(__ioremap);
129 * ioremap_nocache - map bus memory into CPU space
130 * @offset: bus address of the memory
131 * @size: size of the resource to map
133 * ioremap_nocache performs a platform specific sequence of operations to
134 * make bus memory CPU accessible via the readb/readw/readl/writeb/
135 * writew/writel functions and the other mmio helpers. The returned
136 * address is not guaranteed to be usable directly as a virtual
139 * This version of ioremap ensures that the memory is marked uncachable
140 * on the CPU as well as honouring existing caching rules from things like
141 * the PCI bus. Note that there are other caches and buffers on many
142 * busses. In particular driver authors should read up on PCI writes
144 * It's useful if some control registers are in such an area and
145 * write combining or read caching is not desirable:
147 * Must be freed with iounmap.
150 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
152 return __ioremap(phys_addr, size, _PAGE_PCD);
154 EXPORT_SYMBOL(ioremap_nocache);
157 * iounmap - Free a IO remapping
158 * @addr: virtual address from ioremap_*
160 * Caller must ensure there is only one unmapping for the same pointer.
162 void iounmap(volatile void __iomem *addr)
164 struct vm_struct *p, *o;
166 if (addr <= high_memory)
168 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
169 addr < phys_to_virt(ISA_END_ADDRESS))
172 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
173 /* Use the vm area unlocked, assuming the caller
174 ensures there isn't another iounmap for the same address
175 in parallel. Reuse of the virtual address is prevented by
176 leaving it in the global lists until we're done with it.
177 cpa takes care of the direct mappings. */
178 read_lock(&vmlist_lock);
179 for (p = vmlist; p; p = p->next) {
183 read_unlock(&vmlist_lock);
186 printk("iounmap: bad address %p\n", addr);
191 /* Reset the direct mapping. Can block */
193 ioremap_change_attr(p->phys_addr, p->size, 0);
195 /* Finally remove it */
196 o = remove_vm_area((void *)addr);
197 BUG_ON(p != o || o == NULL);
200 EXPORT_SYMBOL(iounmap);