2 * arch/cris/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * Needed for memory-mapped I/O devices mapped outside our normal DRAM
6 * window (that is, all memory-mapped I/O devices).
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * CRIS-port by Axis Communications AB
12 #include <linux/vmalloc.h>
14 #include <asm/pgalloc.h>
15 #include <asm/cacheflush.h>
16 #include <asm/tlbflush.h>
17 #include <asm/arch/memmap.h>
19 extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
20 unsigned long phys_addr, pgprot_t prot)
31 if (!pte_none(*pte)) {
32 printk("remap_area_pte: page already exists\n");
35 set_pte(pte, mk_pte_phys(phys_addr, prot));
37 phys_addr += PAGE_SIZE;
39 } while (address && (address < end));
42 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
43 unsigned long phys_addr, pgprot_t prot)
47 address &= ~PGDIR_MASK;
55 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
58 remap_area_pte(pte, address, end - address, address + phys_addr, prot);
59 address = (address + PMD_SIZE) & PMD_MASK;
61 } while (address && (address < end));
65 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
66 unsigned long size, pgprot_t prot)
70 unsigned long end = address + size;
73 dir = pgd_offset(&init_mm, address);
77 spin_lock(&init_mm.page_table_lock);
83 pud = pud_alloc(&init_mm, dir, address);
86 pmd = pmd_alloc(&init_mm, pud, address);
90 if (remap_area_pmd(pmd, address, end - address,
91 phys_addr + address, prot))
94 address = (address + PGDIR_SIZE) & PGDIR_MASK;
96 } while (address && (address < end));
97 spin_unlock(&init_mm.page_table_lock);
103 * Generic mapping function (not visible outside):
107 * Remap an arbitrary physical address space into the kernel virtual
108 * address space. Needed when the kernel wants to access high addresses
111 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
112 * have to convert them into an offset in a page-aligned mapping, but the
113 * caller shouldn't need to know that small detail.
115 void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot)
118 struct vm_struct * area;
119 unsigned long offset, last_addr;
121 /* Don't allow wraparound or zero size */
122 last_addr = phys_addr + size - 1;
123 if (!size || last_addr < phys_addr)
127 * Mappings have to be page-aligned
129 offset = phys_addr & ~PAGE_MASK;
130 phys_addr &= PAGE_MASK;
131 size = PAGE_ALIGN(last_addr+1) - phys_addr;
136 area = get_vm_area(size, VM_IOREMAP);
139 addr = (void __iomem *)area->addr;
140 if (remap_area_pages((unsigned long) addr, phys_addr, size, prot)) {
141 vfree((void __force *)addr);
144 return (void __iomem *) (offset + (char __iomem *)addr);
147 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
149 return __ioremap_prot(phys_addr, size,
150 __pgprot(_PAGE_PRESENT | __READABLE |
151 __WRITEABLE | _PAGE_GLOBAL |
152 _PAGE_KERNEL | flags));
156 * ioremap_nocache - map bus memory into CPU space
157 * @offset: bus address of the memory
158 * @size: size of the resource to map
160 * Must be freed with iounmap.
163 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
165 return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
168 void iounmap(volatile void __iomem *addr)
170 if (addr > high_memory)
171 return vfree((void *) (PAGE_MASK & (unsigned long) addr));