2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * (C) Copyright 1995 1996 Linus Torvalds
7 * (C) Copyright 2001, 2002 Ralf Baechle
9 #include <linux/module.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
13 #include <linux/vmalloc.h>
14 #include <asm/cacheflush.h>
16 #include <asm/tlbflush.h>
18 static inline void remap_area_pte(pte_t * pte, unsigned long address,
19 phys_t size, phys_t phys_addr, unsigned long flags)
23 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
24 | __WRITEABLE | flags);
32 pfn = phys_addr >> PAGE_SHIFT;
34 if (!pte_none(*pte)) {
35 printk("remap_area_pte: page already exists\n");
38 set_pte(pte, pfn_pte(pfn, pgprot));
42 } while (address && (address < end));
45 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
46 phys_t size, phys_t phys_addr, unsigned long flags)
50 address &= ~PGDIR_MASK;
58 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
61 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
62 address = (address + PMD_SIZE) & PMD_MASK;
64 } while (address && (address < end));
68 static int remap_area_pages(unsigned long address, phys_t phys_addr,
69 phys_t size, unsigned long flags)
73 unsigned long end = address + size;
76 dir = pgd_offset(&init_mm, address);
80 spin_lock(&init_mm.page_table_lock);
86 pud = pud_alloc(&init_mm, dir, address);
89 pmd = pmd_alloc(&init_mm, pud, address);
92 if (remap_area_pmd(pmd, address, end - address,
93 phys_addr + address, flags))
96 address = (address + PGDIR_SIZE) & PGDIR_MASK;
98 } while (address && (address < end));
99 spin_unlock(&init_mm.page_table_lock);
105 * Allow physical addresses to be fixed up to help 36 bit peripherals.
107 phys_t __attribute__ ((weak))
108 fixup_bigphys_addr(phys_t phys_addr, phys_t size)
114 * Generic mapping function (not visible outside):
118 * Remap an arbitrary physical address space into the kernel virtual
119 * address space. Needed when the kernel wants to access high addresses
122 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
123 * have to convert them into an offset in a page-aligned mapping, but the
124 * caller shouldn't need to know that small detail.
127 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
129 void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
131 struct vm_struct * area;
132 unsigned long offset;
136 phys_addr = fixup_bigphys_addr(phys_addr, size);
138 /* Don't allow wraparound or zero size */
139 last_addr = phys_addr + size - 1;
140 if (!size || last_addr < phys_addr)
144 * Map uncached objects in the low 512mb of address space using KSEG1,
145 * otherwise map using page tables.
147 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
148 flags == _CACHE_UNCACHED)
149 return (void *) CKSEG1ADDR(phys_addr);
152 * Don't allow anybody to remap normal RAM that we're using..
154 if (phys_addr < virt_to_phys(high_memory)) {
155 char *t_addr, *t_end;
158 t_addr = __va(phys_addr);
159 t_end = t_addr + (size - 1);
161 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
162 if(!PageReserved(page))
167 * Mappings have to be page-aligned
169 offset = phys_addr & ~PAGE_MASK;
170 phys_addr &= PAGE_MASK;
171 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
176 area = get_vm_area(size, VM_IOREMAP);
180 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
185 return (void *) (offset + (char *)addr);
188 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
190 void __iounmap(volatile void __iomem *addr)
197 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
199 printk(KERN_ERR "iounmap: bad address %p\n", addr);
204 EXPORT_SYMBOL(__ioremap);
205 EXPORT_SYMBOL(__iounmap);