2 * arch/x86_64/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <asm/pgalloc.h>
17 #include <asm/fixmap.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/proto.h>
22 #define ISA_START_ADDRESS 0xa0000
23 #define ISA_END_ADDRESS 0x100000
25 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
26 unsigned long phys_addr, unsigned long flags)
37 pfn = phys_addr >> PAGE_SHIFT;
39 if (!pte_none(*pte)) {
40 printk("remap_area_pte: page already exists\n");
43 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
44 _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
48 } while (address && (address < end));
51 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
52 unsigned long phys_addr, unsigned long flags)
64 pte_t * pte = pte_alloc_kernel(pmd, address);
67 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
68 address = (address + PMD_SIZE) & PMD_MASK;
70 } while (address && (address < end));
74 static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
75 unsigned long phys_addr, unsigned long flags)
79 address &= ~PGDIR_MASK;
87 pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
90 remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
91 address = (address + PUD_SIZE) & PUD_MASK;
93 } while (address && (address < end));
97 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
98 unsigned long size, unsigned long flags)
102 unsigned long end = address + size;
104 phys_addr -= address;
105 pgd = pgd_offset_k(address);
111 pud = pud_alloc(&init_mm, pgd, address);
115 if (remap_area_pud(pud, address, end - address,
116 phys_addr + address, flags))
119 address = (address + PGDIR_SIZE) & PGDIR_MASK;
121 } while (address && (address < end));
127 * Fix up the linear direct mapping of the kernel to avoid cache attribute
131 ioremap_change_attr(unsigned long phys_addr, unsigned long size,
135 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
136 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
137 unsigned long vaddr = (unsigned long) __va(phys_addr);
140 * Must use a address here and not struct page because the phys addr
141 * can be a in hole between nodes and not have an memmap entry.
143 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
151 * Generic mapping function
155 * Remap an arbitrary physical address space into the kernel virtual
156 * address space. Needed when the kernel wants to access high addresses
159 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
160 * have to convert them into an offset in a page-aligned mapping, but the
161 * caller shouldn't need to know that small detail.
163 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
166 struct vm_struct * area;
167 unsigned long offset, last_addr;
169 /* Don't allow wraparound or zero size */
170 last_addr = phys_addr + size - 1;
171 if (!size || last_addr < phys_addr)
175 * Don't remap the low PCI/ISA area, it's always mapped..
177 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
178 return (__force void __iomem *)phys_to_virt(phys_addr);
180 #ifdef CONFIG_FLATMEM
182 * Don't allow anybody to remap normal RAM that we're using..
184 if (last_addr < virt_to_phys(high_memory)) {
185 char *t_addr, *t_end;
188 t_addr = __va(phys_addr);
189 t_end = t_addr + (size - 1);
191 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
192 if(!PageReserved(page))
198 * Mappings have to be page-aligned
200 offset = phys_addr & ~PAGE_MASK;
201 phys_addr &= PAGE_MASK;
202 size = PAGE_ALIGN(last_addr+1) - phys_addr;
207 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
210 area->phys_addr = phys_addr;
212 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
213 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
216 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
217 area->flags &= 0xffffff;
221 return (__force void __iomem *) (offset + (char *)addr);
223 EXPORT_SYMBOL(__ioremap);
226 * ioremap_nocache - map bus memory into CPU space
227 * @offset: bus address of the memory
228 * @size: size of the resource to map
230 * ioremap_nocache performs a platform specific sequence of operations to
231 * make bus memory CPU accessible via the readb/readw/readl/writeb/
232 * writew/writel functions and the other mmio helpers. The returned
233 * address is not guaranteed to be usable directly as a virtual
236 * This version of ioremap ensures that the memory is marked uncachable
237 * on the CPU as well as honouring existing caching rules from things like
238 * the PCI bus. Note that there are other caches and buffers on many
239 * busses. In particular driver authors should read up on PCI writes
241 * It's useful if some control registers are in such an area and
242 * write combining or read caching is not desirable:
244 * Must be freed with iounmap.
247 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
249 return __ioremap(phys_addr, size, _PAGE_PCD);
251 EXPORT_SYMBOL(ioremap_nocache);
254 * iounmap - Free a IO remapping
255 * @addr: virtual address from ioremap_*
257 * Caller must ensure there is only one unmapping for the same pointer.
259 void iounmap(volatile void __iomem *addr)
261 struct vm_struct *p, *o;
263 if (addr <= high_memory)
265 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
266 addr < phys_to_virt(ISA_END_ADDRESS))
269 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
270 /* Use the vm area unlocked, assuming the caller
271 ensures there isn't another iounmap for the same address
272 in parallel. Reuse of the virtual address is prevented by
273 leaving it in the global lists until we're done with it.
274 cpa takes care of the direct mappings. */
275 read_lock(&vmlist_lock);
276 for (p = vmlist; p; p = p->next) {
280 read_unlock(&vmlist_lock);
283 printk("iounmap: bad address %p\n", addr);
288 /* Reset the direct mapping. Can block */
290 ioremap_change_attr(p->phys_addr, p->size, 0);
292 /* Finally remove it */
293 o = remove_vm_area((void *)addr);
294 BUG_ON(p != o || o == NULL);
297 EXPORT_SYMBOL(iounmap);