2 * arch/x86_64/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
15 #include <asm/pgalloc.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/proto.h>
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
24 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
25 unsigned long phys_addr, unsigned long flags)
36 pfn = phys_addr >> PAGE_SHIFT;
38 if (!pte_none(*pte)) {
39 printk("remap_area_pte: page already exists\n");
42 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
43 _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
47 } while (address && (address < end));
50 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
51 unsigned long phys_addr, unsigned long flags)
63 pte_t * pte = pte_alloc_kernel(pmd, address);
66 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
67 address = (address + PMD_SIZE) & PMD_MASK;
69 } while (address && (address < end));
73 static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
74 unsigned long phys_addr, unsigned long flags)
78 address &= ~PGDIR_MASK;
86 pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
89 remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
90 address = (address + PUD_SIZE) & PUD_MASK;
92 } while (address && (address < end));
96 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
97 unsigned long size, unsigned long flags)
101 unsigned long end = address + size;
103 phys_addr -= address;
104 pgd = pgd_offset_k(address);
110 pud = pud_alloc(&init_mm, pgd, address);
114 if (remap_area_pud(pud, address, end - address,
115 phys_addr + address, flags))
118 address = (address + PGDIR_SIZE) & PGDIR_MASK;
120 } while (address && (address < end));
126 * Fix up the linear direct mapping of the kernel to avoid cache attribute
130 ioremap_change_attr(unsigned long phys_addr, unsigned long size,
134 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
135 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
136 unsigned long vaddr = (unsigned long) __va(phys_addr);
139 * Must use a address here and not struct page because the phys addr
140 * can be a in hole between nodes and not have an memmap entry.
142 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
150 * Generic mapping function
154 * Remap an arbitrary physical address space into the kernel virtual
155 * address space. Needed when the kernel wants to access high addresses
158 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
159 * have to convert them into an offset in a page-aligned mapping, but the
160 * caller shouldn't need to know that small detail.
162 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
165 struct vm_struct * area;
166 unsigned long offset, last_addr;
168 /* Don't allow wraparound or zero size */
169 last_addr = phys_addr + size - 1;
170 if (!size || last_addr < phys_addr)
174 * Don't remap the low PCI/ISA area, it's always mapped..
176 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
177 return (__force void __iomem *)phys_to_virt(phys_addr);
179 #ifdef CONFIG_FLATMEM
181 * Don't allow anybody to remap normal RAM that we're using..
183 if (last_addr < virt_to_phys(high_memory)) {
184 char *t_addr, *t_end;
187 t_addr = __va(phys_addr);
188 t_end = t_addr + (size - 1);
190 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
191 if(!PageReserved(page))
197 * Mappings have to be page-aligned
199 offset = phys_addr & ~PAGE_MASK;
200 phys_addr &= PAGE_MASK;
201 size = PAGE_ALIGN(last_addr+1) - phys_addr;
206 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
209 area->phys_addr = phys_addr;
211 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
212 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
215 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
216 area->flags &= 0xffffff;
220 return (__force void __iomem *) (offset + (char *)addr);
224 * ioremap_nocache - map bus memory into CPU space
225 * @offset: bus address of the memory
226 * @size: size of the resource to map
228 * ioremap_nocache performs a platform specific sequence of operations to
229 * make bus memory CPU accessible via the readb/readw/readl/writeb/
230 * writew/writel functions and the other mmio helpers. The returned
231 * address is not guaranteed to be usable directly as a virtual
234 * This version of ioremap ensures that the memory is marked uncachable
235 * on the CPU as well as honouring existing caching rules from things like
236 * the PCI bus. Note that there are other caches and buffers on many
237 * busses. In particular driver authors should read up on PCI writes
239 * It's useful if some control registers are in such an area and
240 * write combining or read caching is not desirable:
242 * Must be freed with iounmap.
245 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
247 return __ioremap(phys_addr, size, _PAGE_PCD);
251 * iounmap - Free a IO remapping
252 * @addr: virtual address from ioremap_*
254 * Caller must ensure there is only one unmapping for the same pointer.
256 void iounmap(volatile void __iomem *addr)
258 struct vm_struct *p, *o;
260 if (addr <= high_memory)
262 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
263 addr < phys_to_virt(ISA_END_ADDRESS))
266 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
267 /* Use the vm area unlocked, assuming the caller
268 ensures there isn't another iounmap for the same address
269 in parallel. Reuse of the virtual address is prevented by
270 leaving it in the global lists until we're done with it.
271 cpa takes care of the direct mappings. */
272 read_lock(&vmlist_lock);
273 for (p = vmlist; p; p = p->next) {
277 read_unlock(&vmlist_lock);
280 printk("iounmap: bad address %p\n", addr);
285 /* Reset the direct mapping. Can block */
287 ioremap_change_attr(p->phys_addr, p->size, 0);
289 /* Finally remove it */
290 o = remove_vm_area((void *)addr);
291 BUG_ON(p != o || o == NULL);