2 * linux/arch/arm/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
6 * (C) Copyright 1995 1996 Linus Torvalds
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
23 #include <linux/module.h>
24 #include <linux/errno.h>
26 #include <linux/vmalloc.h>
28 #include <asm/cacheflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/sizes.h>
36 * Used by ioremap() and iounmap() code to mark (super)section-mapped
37 * I/O regions in vm_struct->flags field.
39 #define VM_ARM_SECTION_MAPPING 0x80000000
42 remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
43 unsigned long phys_addr, pgprot_t pgprot)
51 BUG_ON(address >= end);
56 set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
58 phys_addr += PAGE_SIZE;
60 } while (address && (address < end));
64 printk("remap_area_pte: page already exists\n");
69 remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
70 unsigned long phys_addr, unsigned long flags)
75 address &= ~PGDIR_MASK;
82 BUG_ON(address >= end);
84 pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
86 pte_t * pte = pte_alloc_kernel(pmd, address);
89 remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
90 address = (address + PMD_SIZE) & PMD_MASK;
92 } while (address && (address < end));
97 remap_area_pages(unsigned long start, unsigned long pfn,
98 unsigned long size, unsigned long flags)
100 unsigned long address = start;
101 unsigned long end = start + size;
102 unsigned long phys_addr = __pfn_to_phys(pfn);
106 phys_addr -= address;
107 dir = pgd_offset(&init_mm, address);
108 BUG_ON(address >= end);
110 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
115 if (remap_area_pmd(pmd, address, end - address,
116 phys_addr + address, flags)) {
121 address = (address + PGDIR_SIZE) & PGDIR_MASK;
123 } while (address && (address < end));
129 void __check_kvm_seq(struct mm_struct *mm)
134 seq = init_mm.context.kvm_seq;
135 memcpy(pgd_offset(mm, VMALLOC_START),
136 pgd_offset_k(VMALLOC_START),
137 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
138 pgd_index(VMALLOC_START)));
139 mm->context.kvm_seq = seq;
140 } while (seq != init_mm.context.kvm_seq);
145 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
146 * the other CPUs will not see this change until their next context switch.
147 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
148 * which requires the new ioremap'd region to be referenced, the CPU will
149 * reference the _old_ region.
151 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
152 * the size back to 1MB aligned or we will overflow in the loop below.
154 static void unmap_area_sections(unsigned long virt, unsigned long size)
156 unsigned long addr = virt, end = virt + (size & ~SZ_1M);
159 flush_cache_vunmap(addr, end);
160 pgd = pgd_offset_k(addr);
162 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
165 if (!pmd_none(pmd)) {
167 * Clear the PMD from the page table, and
168 * increment the kvm sequence so others
169 * notice this change.
171 * Note: this is still racy on SMP machines.
174 init_mm.context.kvm_seq++;
177 * Free the page table, if there was one.
179 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
180 pte_free_kernel(pmd_page_kernel(pmd));
185 } while (addr < end);
188 * Ensure that the active_mm is up to date - we want to
189 * catch any use-after-iounmap cases.
191 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
192 __check_kvm_seq(current->active_mm);
194 flush_tlb_kernel_range(virt, end);
198 remap_area_sections(unsigned long virt, unsigned long pfn,
199 unsigned long size, unsigned long flags)
201 unsigned long prot, addr = virt, end = virt + size;
205 * Remove and free any PTE-based mapping, and
206 * sync the current kernel mapping.
208 unmap_area_sections(virt, size);
210 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
211 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
214 * ARMv6 and above need XN set to prevent speculative prefetches
217 if (cpu_architecture() >= CPU_ARCH_ARMv6)
220 pgd = pgd_offset_k(addr);
222 pmd_t *pmd = pmd_offset(pgd, addr);
224 pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
225 pfn += SZ_1M >> PAGE_SHIFT;
226 pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
227 pfn += SZ_1M >> PAGE_SHIFT;
228 flush_pmd_entry(pmd);
232 } while (addr < end);
238 remap_area_supersections(unsigned long virt, unsigned long pfn,
239 unsigned long size, unsigned long flags)
241 unsigned long prot, addr = virt, end = virt + size;
245 * Remove and free any PTE-based mapping, and
246 * sync the current kernel mapping.
248 unmap_area_sections(virt, size);
250 prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
251 PMD_DOMAIN(DOMAIN_IO) |
252 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
255 * ARMv6 and above need XN set to prevent speculative prefetches
258 if (cpu_architecture() >= CPU_ARCH_ARMv6)
261 pgd = pgd_offset_k(virt);
263 unsigned long super_pmd_val, i;
265 super_pmd_val = __pfn_to_phys(pfn) | prot;
266 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
268 for (i = 0; i < 8; i++) {
269 pmd_t *pmd = pmd_offset(pgd, addr);
271 pmd[0] = __pmd(super_pmd_val);
272 pmd[1] = __pmd(super_pmd_val);
273 flush_pmd_entry(pmd);
279 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
280 } while (addr < end);
288 * Remap an arbitrary physical address space into the kernel virtual
289 * address space. Needed when the kernel wants to access high addresses
292 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
293 * have to convert them into an offset in a page-aligned mapping, but the
294 * caller shouldn't need to know that small detail.
296 * 'flags' are the extra L_PTE_ flags that you want to specify for this
297 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
300 __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
305 struct vm_struct * area;
308 * High mappings must be supersection aligned
310 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
313 area = get_vm_area(size, VM_IOREMAP);
316 addr = (unsigned long)area->addr;
319 if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
321 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
322 area->flags |= VM_ARM_SECTION_MAPPING;
323 err = remap_area_supersections(addr, pfn, size, flags);
324 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
325 area->flags |= VM_ARM_SECTION_MAPPING;
326 err = remap_area_sections(addr, pfn, size, flags);
329 err = remap_area_pages(addr, pfn, size, flags);
332 vunmap((void *)addr);
336 flush_cache_vmap(addr, addr + size);
337 return (void __iomem *) (offset + addr);
339 EXPORT_SYMBOL(__ioremap_pfn);
342 __ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
344 unsigned long last_addr;
345 unsigned long offset = phys_addr & ~PAGE_MASK;
346 unsigned long pfn = __phys_to_pfn(phys_addr);
349 * Don't allow wraparound or zero size
351 last_addr = phys_addr + size - 1;
352 if (!size || last_addr < phys_addr)
356 * Page align the mapping size
358 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
360 return __ioremap_pfn(pfn, offset, size, flags);
362 EXPORT_SYMBOL(__ioremap);
364 void __iounmap(void __iomem *addr)
366 struct vm_struct **p, *tmp;
367 unsigned int section_mapping = 0;
369 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
373 * If this is a section based mapping we need to handle it
374 * specially as the VM subysystem does not know how to handle
375 * such a beast. We need the lock here b/c we need to clear
376 * all the mappings before the area can be reclaimed
379 write_lock(&vmlist_lock);
380 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
381 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
382 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
384 unmap_area_sections((unsigned long)tmp->addr,
392 write_unlock(&vmlist_lock);
395 if (!section_mapping)
398 EXPORT_SYMBOL(__iounmap);