2 * arch/i386/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
21 #define ISA_START_ADDRESS 0xa0000
22 #define ISA_END_ADDRESS 0x100000
24 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
25 unsigned long end, unsigned long phys_addr, unsigned long flags)
30 pfn = phys_addr >> PAGE_SHIFT;
31 pte = pte_alloc_kernel(&init_mm, pmd, addr);
35 BUG_ON(!pte_none(*pte));
36 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
37 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
39 } while (pte++, addr += PAGE_SIZE, addr != end);
43 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
44 unsigned long end, unsigned long phys_addr, unsigned long flags)
50 pmd = pmd_alloc(&init_mm, pud, addr);
54 next = pmd_addr_end(addr, end);
55 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags))
57 } while (pmd++, addr = next, addr != end);
61 static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
62 unsigned long end, unsigned long phys_addr, unsigned long flags)
68 pud = pud_alloc(&init_mm, pgd, addr);
72 next = pud_addr_end(addr, end);
73 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags))
75 } while (pud++, addr = next, addr != end);
79 static int ioremap_page_range(unsigned long addr,
80 unsigned long end, unsigned long phys_addr, unsigned long flags)
89 pgd = pgd_offset_k(addr);
90 spin_lock(&init_mm.page_table_lock);
92 next = pgd_addr_end(addr, end);
93 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
96 } while (pgd++, addr = next, addr != end);
97 spin_unlock(&init_mm.page_table_lock);
103 * Generic mapping function (not visible outside):
107 * Remap an arbitrary physical address space into the kernel virtual
108 * address space. Needed when the kernel wants to access high addresses
111 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
112 * have to convert them into an offset in a page-aligned mapping, but the
113 * caller shouldn't need to know that small detail.
115 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
118 struct vm_struct * area;
119 unsigned long offset, last_addr;
121 /* Don't allow wraparound or zero size */
122 last_addr = phys_addr + size - 1;
123 if (!size || last_addr < phys_addr)
127 * Don't remap the low PCI/ISA area, it's always mapped..
129 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
130 return (void __iomem *) phys_to_virt(phys_addr);
133 * Don't allow anybody to remap normal RAM that we're using..
135 if (phys_addr <= virt_to_phys(high_memory - 1)) {
136 char *t_addr, *t_end;
139 t_addr = __va(phys_addr);
140 t_end = t_addr + (size - 1);
142 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
143 if(!PageReserved(page))
148 * Mappings have to be page-aligned
150 offset = phys_addr & ~PAGE_MASK;
151 phys_addr &= PAGE_MASK;
152 size = PAGE_ALIGN(last_addr+1) - phys_addr;
157 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
160 area->phys_addr = phys_addr;
161 addr = (void __iomem *) area->addr;
162 if (ioremap_page_range((unsigned long) addr,
163 (unsigned long) addr + size, phys_addr, flags)) {
164 vunmap((void __force *) addr);
167 return (void __iomem *) (offset + (char __iomem *)addr);
169 EXPORT_SYMBOL(__ioremap);
172 * ioremap_nocache - map bus memory into CPU space
173 * @offset: bus address of the memory
174 * @size: size of the resource to map
176 * ioremap_nocache performs a platform specific sequence of operations to
177 * make bus memory CPU accessible via the readb/readw/readl/writeb/
178 * writew/writel functions and the other mmio helpers. The returned
179 * address is not guaranteed to be usable directly as a virtual
182 * This version of ioremap ensures that the memory is marked uncachable
183 * on the CPU as well as honouring existing caching rules from things like
184 * the PCI bus. Note that there are other caches and buffers on many
185 * busses. In particular driver authors should read up on PCI writes
187 * It's useful if some control registers are in such an area and
188 * write combining or read caching is not desirable:
190 * Must be freed with iounmap.
193 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
195 unsigned long last_addr;
196 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
200 /* Guaranteed to be > phys_addr, as per __ioremap() */
201 last_addr = phys_addr + size - 1;
203 if (last_addr < virt_to_phys(high_memory) - 1) {
204 struct page *ppage = virt_to_page(__va(phys_addr));
205 unsigned long npages;
207 phys_addr &= PAGE_MASK;
209 /* This might overflow and become zero.. */
210 last_addr = PAGE_ALIGN(last_addr);
212 /* .. but that's ok, because modulo-2**n arithmetic will make
213 * the page-aligned "last - first" come out right.
215 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
217 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
226 EXPORT_SYMBOL(ioremap_nocache);
228 void iounmap(volatile void __iomem *addr)
231 if ((void __force *) addr <= high_memory)
235 * __ioremap special-cases the PCI/ISA range by not instantiating a
236 * vm_area and by simply returning an address into the kernel mapping
237 * of ISA space. So handle that here.
239 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
240 addr < phys_to_virt(ISA_END_ADDRESS))
243 write_lock(&vmlist_lock);
244 p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
246 printk("iounmap: bad address %p\n", addr);
250 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
251 change_page_attr(virt_to_page(__va(p->phys_addr)),
252 p->size >> PAGE_SHIFT,
257 write_unlock(&vmlist_lock);
260 EXPORT_SYMBOL(iounmap);
262 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
264 unsigned long offset, last_addr;
265 unsigned int nrpages;
266 enum fixed_addresses idx;
268 /* Don't allow wraparound or zero size */
269 last_addr = phys_addr + size - 1;
270 if (!size || last_addr < phys_addr)
274 * Don't remap the low PCI/ISA area, it's always mapped..
276 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
277 return phys_to_virt(phys_addr);
280 * Mappings have to be page-aligned
282 offset = phys_addr & ~PAGE_MASK;
283 phys_addr &= PAGE_MASK;
284 size = PAGE_ALIGN(last_addr) - phys_addr;
287 * Mappings have to fit in the FIX_BTMAP area.
289 nrpages = size >> PAGE_SHIFT;
290 if (nrpages > NR_FIX_BTMAPS)
296 idx = FIX_BTMAP_BEGIN;
297 while (nrpages > 0) {
298 set_fixmap(idx, phys_addr);
299 phys_addr += PAGE_SIZE;
303 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
306 void __init bt_iounmap(void *addr, unsigned long size)
308 unsigned long virt_addr;
309 unsigned long offset;
310 unsigned int nrpages;
311 enum fixed_addresses idx;
313 virt_addr = (unsigned long)addr;
314 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
316 offset = virt_addr & ~PAGE_MASK;
317 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
319 idx = FIX_BTMAP_BEGIN;
320 while (nrpages > 0) {