Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[linux-2.6] / arch / arm / mm / ioremap.c
1 /*
2  *  linux/arch/arm/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  *
8  * Hacked for ARM by Phil Blundell <philb@gnu.org>
9  * Hacked to allow all architectures to build, and various cleanups
10  * by Russell King
11  *
12  * This allows a driver to remap an arbitrary region of bus memory into
13  * virtual space.  One should *only* use readl, writel, memcpy_toio and
14  * so on with such remapped areas.
15  *
16  * Because the ARM only has a 32-bit address space we can't address the
17  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
18  * allows us to circumvent this restriction by splitting PCI space into
19  * two 2GB chunks and mapping only one at a time into processor memory.
20  * We use MMU protection domains to trap any attempt to access the bank
21  * that is not currently mapped.  (This isn't fully implemented yet.)
22  */
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/vmalloc.h>
27
28 #include <asm/cacheflush.h>
29 #include <asm/io.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/sizes.h>
34
35 /*
36  * Used by ioremap() and iounmap() code to mark (super)section-mapped
37  * I/O regions in vm_struct->flags field.
38  */
39 #define VM_ARM_SECTION_MAPPING  0x80000000
40
41 static inline void
42 remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
43                unsigned long phys_addr, pgprot_t pgprot)
44 {
45         unsigned long end;
46
47         address &= ~PMD_MASK;
48         end = address + size;
49         if (end > PMD_SIZE)
50                 end = PMD_SIZE;
51         BUG_ON(address >= end);
52         do {
53                 if (!pte_none(*pte))
54                         goto bad;
55
56                 set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
57                 address += PAGE_SIZE;
58                 phys_addr += PAGE_SIZE;
59                 pte++;
60         } while (address && (address < end));
61         return;
62
63  bad:
64         printk("remap_area_pte: page already exists\n");
65         BUG();
66 }
67
68 static inline int
69 remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
70                unsigned long phys_addr, unsigned long flags)
71 {
72         unsigned long end;
73         pgprot_t pgprot;
74
75         address &= ~PGDIR_MASK;
76         end = address + size;
77
78         if (end > PGDIR_SIZE)
79                 end = PGDIR_SIZE;
80
81         phys_addr -= address;
82         BUG_ON(address >= end);
83
84         pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
85         do {
86                 pte_t * pte = pte_alloc_kernel(pmd, address);
87                 if (!pte)
88                         return -ENOMEM;
89                 remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
90                 address = (address + PMD_SIZE) & PMD_MASK;
91                 pmd++;
92         } while (address && (address < end));
93         return 0;
94 }
95
96 static int
97 remap_area_pages(unsigned long start, unsigned long pfn,
98                  unsigned long size, unsigned long flags)
99 {
100         unsigned long address = start;
101         unsigned long end = start + size;
102         unsigned long phys_addr = __pfn_to_phys(pfn);
103         int err = 0;
104         pgd_t * dir;
105
106         phys_addr -= address;
107         dir = pgd_offset(&init_mm, address);
108         BUG_ON(address >= end);
109         do {
110                 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
111                 if (!pmd) {
112                         err = -ENOMEM;
113                         break;
114                 }
115                 if (remap_area_pmd(pmd, address, end - address,
116                                          phys_addr + address, flags)) {
117                         err = -ENOMEM;
118                         break;
119                 }
120
121                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
122                 dir++;
123         } while (address && (address < end));
124
125         return err;
126 }
127
128
129 void __check_kvm_seq(struct mm_struct *mm)
130 {
131         unsigned int seq;
132
133         do {
134                 seq = init_mm.context.kvm_seq;
135                 memcpy(pgd_offset(mm, VMALLOC_START),
136                        pgd_offset_k(VMALLOC_START),
137                        sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
138                                         pgd_index(VMALLOC_START)));
139                 mm->context.kvm_seq = seq;
140         } while (seq != init_mm.context.kvm_seq);
141 }
142
143 #ifndef CONFIG_SMP
144 /*
145  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
146  * the other CPUs will not see this change until their next context switch.
147  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
148  * which requires the new ioremap'd region to be referenced, the CPU will
149  * reference the _old_ region.
150  *
151  * Note that get_vm_area() allocates a guard 4K page, so we need to mask
152  * the size back to 1MB aligned or we will overflow in the loop below.
153  */
154 static void unmap_area_sections(unsigned long virt, unsigned long size)
155 {
156         unsigned long addr = virt, end = virt + (size & ~SZ_1M);
157         pgd_t *pgd;
158
159         flush_cache_vunmap(addr, end);
160         pgd = pgd_offset_k(addr);
161         do {
162                 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
163
164                 pmd = *pmdp;
165                 if (!pmd_none(pmd)) {
166                         /*
167                          * Clear the PMD from the page table, and
168                          * increment the kvm sequence so others
169                          * notice this change.
170                          *
171                          * Note: this is still racy on SMP machines.
172                          */
173                         pmd_clear(pmdp);
174                         init_mm.context.kvm_seq++;
175
176                         /*
177                          * Free the page table, if there was one.
178                          */
179                         if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
180                                 pte_free_kernel(pmd_page_vaddr(pmd));
181                 }
182
183                 addr += PGDIR_SIZE;
184                 pgd++;
185         } while (addr < end);
186
187         /*
188          * Ensure that the active_mm is up to date - we want to
189          * catch any use-after-iounmap cases.
190          */
191         if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
192                 __check_kvm_seq(current->active_mm);
193
194         flush_tlb_kernel_range(virt, end);
195 }
196
197 static int
198 remap_area_sections(unsigned long virt, unsigned long pfn,
199                     unsigned long size, unsigned long flags)
200 {
201         unsigned long prot, addr = virt, end = virt + size;
202         pgd_t *pgd;
203
204         /*
205          * Remove and free any PTE-based mapping, and
206          * sync the current kernel mapping.
207          */
208         unmap_area_sections(virt, size);
209
210         prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
211                (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
212
213         /*
214          * ARMv6 and above need XN set to prevent speculative prefetches
215          * hitting IO.
216          */
217         if (cpu_architecture() >= CPU_ARCH_ARMv6)
218                 prot |= PMD_SECT_XN;
219
220         pgd = pgd_offset_k(addr);
221         do {
222                 pmd_t *pmd = pmd_offset(pgd, addr);
223
224                 pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
225                 pfn += SZ_1M >> PAGE_SHIFT;
226                 pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
227                 pfn += SZ_1M >> PAGE_SHIFT;
228                 flush_pmd_entry(pmd);
229
230                 addr += PGDIR_SIZE;
231                 pgd++;
232         } while (addr < end);
233
234         return 0;
235 }
236
237 static int
238 remap_area_supersections(unsigned long virt, unsigned long pfn,
239                          unsigned long size, unsigned long flags)
240 {
241         unsigned long prot, addr = virt, end = virt + size;
242         pgd_t *pgd;
243
244         /*
245          * Remove and free any PTE-based mapping, and
246          * sync the current kernel mapping.
247          */
248         unmap_area_sections(virt, size);
249
250         prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
251                         PMD_DOMAIN(DOMAIN_IO) |
252                         (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
253
254         /*
255          * ARMv6 and above need XN set to prevent speculative prefetches
256          * hitting IO.
257          */
258         if (cpu_architecture() >= CPU_ARCH_ARMv6)
259                 prot |= PMD_SECT_XN;
260
261         pgd = pgd_offset_k(virt);
262         do {
263                 unsigned long super_pmd_val, i;
264
265                 super_pmd_val = __pfn_to_phys(pfn) | prot;
266                 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
267
268                 for (i = 0; i < 8; i++) {
269                         pmd_t *pmd = pmd_offset(pgd, addr);
270
271                         pmd[0] = __pmd(super_pmd_val);
272                         pmd[1] = __pmd(super_pmd_val);
273                         flush_pmd_entry(pmd);
274
275                         addr += PGDIR_SIZE;
276                         pgd++;
277                 }
278
279                 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
280         } while (addr < end);
281
282         return 0;
283 }
284 #endif
285
286
287 /*
288  * Remap an arbitrary physical address space into the kernel virtual
289  * address space. Needed when the kernel wants to access high addresses
290  * directly.
291  *
292  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
293  * have to convert them into an offset in a page-aligned mapping, but the
294  * caller shouldn't need to know that small detail.
295  *
296  * 'flags' are the extra L_PTE_ flags that you want to specify for this
297  * mapping.  See include/asm-arm/proc-armv/pgtable.h for more information.
298  */
299 void __iomem *
300 __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
301               unsigned long flags)
302 {
303         int err;
304         unsigned long addr;
305         struct vm_struct * area;
306
307         /*
308          * High mappings must be supersection aligned
309          */
310         if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
311                 return NULL;
312
313         area = get_vm_area(size, VM_IOREMAP);
314         if (!area)
315                 return NULL;
316         addr = (unsigned long)area->addr;
317
318 #ifndef CONFIG_SMP
319         if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
320                cpu_is_xsc3()) &&
321                !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
322                 area->flags |= VM_ARM_SECTION_MAPPING;
323                 err = remap_area_supersections(addr, pfn, size, flags);
324         } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
325                 area->flags |= VM_ARM_SECTION_MAPPING;
326                 err = remap_area_sections(addr, pfn, size, flags);
327         } else
328 #endif
329                 err = remap_area_pages(addr, pfn, size, flags);
330
331         if (err) {
332                 vunmap((void *)addr);
333                 return NULL;
334         }
335
336         flush_cache_vmap(addr, addr + size);
337         return (void __iomem *) (offset + addr);
338 }
339 EXPORT_SYMBOL(__ioremap_pfn);
340
341 void __iomem *
342 __ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
343 {
344         unsigned long last_addr;
345         unsigned long offset = phys_addr & ~PAGE_MASK;
346         unsigned long pfn = __phys_to_pfn(phys_addr);
347
348         /*
349          * Don't allow wraparound or zero size
350          */
351         last_addr = phys_addr + size - 1;
352         if (!size || last_addr < phys_addr)
353                 return NULL;
354
355         /*
356          * Page align the mapping size
357          */
358         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
359
360         return __ioremap_pfn(pfn, offset, size, flags);
361 }
362 EXPORT_SYMBOL(__ioremap);
363
364 void __iounmap(void __iomem *addr)
365 {
366 #ifndef CONFIG_SMP
367         struct vm_struct **p, *tmp;
368 #endif
369         unsigned int section_mapping = 0;
370
371         addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
372
373 #ifndef CONFIG_SMP
374         /*
375          * If this is a section based mapping we need to handle it
376          * specially as the VM subysystem does not know how to handle
377          * such a beast. We need the lock here b/c we need to clear
378          * all the mappings before the area can be reclaimed
379          * by someone else.
380          */
381         write_lock(&vmlist_lock);
382         for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
383                 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
384                         if (tmp->flags & VM_ARM_SECTION_MAPPING) {
385                                 *p = tmp->next;
386                                 unmap_area_sections((unsigned long)tmp->addr,
387                                                     tmp->size);
388                                 kfree(tmp);
389                                 section_mapping = 1;
390                         }
391                         break;
392                 }
393         }
394         write_unlock(&vmlist_lock);
395 #endif
396
397         if (!section_mapping)
398                 vunmap(addr);
399 }
400 EXPORT_SYMBOL(__iounmap);