Pull platform-drivers into test branch
[linux-2.6] / arch / arm / mm / consistent.c
1 /*
2  *  linux/arch/arm/mm/consistent.c
3  *
4  *  Copyright (C) 2000-2004 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  DMA uncached mapping support.
11  */
12 #include <linux/module.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20
21 #include <asm/memory.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <asm/sizes.h>
25
26 /* Sanity check size */
27 #if (CONSISTENT_DMA_SIZE % SZ_2M)
28 #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
29 #endif
30
31 #define CONSISTENT_END  (0xffe00000)
32 #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
33
34 #define CONSISTENT_OFFSET(x)    (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
35 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
36 #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
37
38
39 /*
40  * These are the page tables (2MB each) covering uncached, DMA consistent allocations
41  */
42 static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
43 static DEFINE_SPINLOCK(consistent_lock);
44
45 /*
46  * VM region handling support.
47  *
48  * This should become something generic, handling VM region allocations for
49  * vmalloc and similar (ioremap, module space, etc).
50  *
51  * I envisage vmalloc()'s supporting vm_struct becoming:
52  *
53  *  struct vm_struct {
54  *    struct vm_region  region;
55  *    unsigned long     flags;
56  *    struct page       **pages;
57  *    unsigned int      nr_pages;
58  *    unsigned long     phys_addr;
59  *  };
60  *
61  * get_vm_area() would then call vm_region_alloc with an appropriate
62  * struct vm_region head (eg):
63  *
64  *  struct vm_region vmalloc_head = {
65  *      .vm_list        = LIST_HEAD_INIT(vmalloc_head.vm_list),
66  *      .vm_start       = VMALLOC_START,
67  *      .vm_end         = VMALLOC_END,
68  *  };
69  *
70  * However, vmalloc_head.vm_start is variable (typically, it is dependent on
71  * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
72  * would have to initialise this each time prior to calling vm_region_alloc().
73  */
74 struct vm_region {
75         struct list_head        vm_list;
76         unsigned long           vm_start;
77         unsigned long           vm_end;
78         struct page             *vm_pages;
79         int                     vm_active;
80 };
81
82 static struct vm_region consistent_head = {
83         .vm_list        = LIST_HEAD_INIT(consistent_head.vm_list),
84         .vm_start       = CONSISTENT_BASE,
85         .vm_end         = CONSISTENT_END,
86 };
87
88 static struct vm_region *
89 vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
90 {
91         unsigned long addr = head->vm_start, end = head->vm_end - size;
92         unsigned long flags;
93         struct vm_region *c, *new;
94
95         new = kmalloc(sizeof(struct vm_region), gfp);
96         if (!new)
97                 goto out;
98
99         spin_lock_irqsave(&consistent_lock, flags);
100
101         list_for_each_entry(c, &head->vm_list, vm_list) {
102                 if ((addr + size) < addr)
103                         goto nospc;
104                 if ((addr + size) <= c->vm_start)
105                         goto found;
106                 addr = c->vm_end;
107                 if (addr > end)
108                         goto nospc;
109         }
110
111  found:
112         /*
113          * Insert this entry _before_ the one we found.
114          */
115         list_add_tail(&new->vm_list, &c->vm_list);
116         new->vm_start = addr;
117         new->vm_end = addr + size;
118         new->vm_active = 1;
119
120         spin_unlock_irqrestore(&consistent_lock, flags);
121         return new;
122
123  nospc:
124         spin_unlock_irqrestore(&consistent_lock, flags);
125         kfree(new);
126  out:
127         return NULL;
128 }
129
130 static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
131 {
132         struct vm_region *c;
133         
134         list_for_each_entry(c, &head->vm_list, vm_list) {
135                 if (c->vm_active && c->vm_start == addr)
136                         goto out;
137         }
138         c = NULL;
139  out:
140         return c;
141 }
142
143 #ifdef CONFIG_HUGETLB_PAGE
144 #error ARM Coherent DMA allocator does not (yet) support huge TLB
145 #endif
146
147 static void *
148 __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
149             pgprot_t prot)
150 {
151         struct page *page;
152         struct vm_region *c;
153         unsigned long order;
154         u64 mask = ISA_DMA_THRESHOLD, limit;
155
156         if (!consistent_pte[0]) {
157                 printk(KERN_ERR "%s: not initialised\n", __func__);
158                 dump_stack();
159                 return NULL;
160         }
161
162         if (dev) {
163                 mask = dev->coherent_dma_mask;
164
165                 /*
166                  * Sanity check the DMA mask - it must be non-zero, and
167                  * must be able to be satisfied by a DMA allocation.
168                  */
169                 if (mask == 0) {
170                         dev_warn(dev, "coherent DMA mask is unset\n");
171                         goto no_page;
172                 }
173
174                 if ((~mask) & ISA_DMA_THRESHOLD) {
175                         dev_warn(dev, "coherent DMA mask %#llx is smaller "
176                                  "than system GFP_DMA mask %#llx\n",
177                                  mask, (unsigned long long)ISA_DMA_THRESHOLD);
178                         goto no_page;
179                 }
180         }
181
182         /*
183          * Sanity check the allocation size.
184          */
185         size = PAGE_ALIGN(size);
186         limit = (mask + 1) & ~mask;
187         if ((limit && size >= limit) ||
188             size >= (CONSISTENT_END - CONSISTENT_BASE)) {
189                 printk(KERN_WARNING "coherent allocation too big "
190                        "(requested %#x mask %#llx)\n", size, mask);
191                 goto no_page;
192         }
193
194         order = get_order(size);
195
196         if (mask != 0xffffffff)
197                 gfp |= GFP_DMA;
198
199         page = alloc_pages(gfp, order);
200         if (!page)
201                 goto no_page;
202
203         /*
204          * Invalidate any data that might be lurking in the
205          * kernel direct-mapped region for device DMA.
206          */
207         {
208                 unsigned long kaddr = (unsigned long)page_address(page);
209                 memset(page_address(page), 0, size);
210                 dmac_flush_range(kaddr, kaddr + size);
211         }
212
213         /*
214          * Allocate a virtual address in the consistent mapping region.
215          */
216         c = vm_region_alloc(&consistent_head, size,
217                             gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
218         if (c) {
219                 pte_t *pte;
220                 struct page *end = page + (1 << order);
221                 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
222                 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
223
224                 pte = consistent_pte[idx] + off;
225                 c->vm_pages = page;
226
227                 split_page(page, order);
228
229                 /*
230                  * Set the "dma handle"
231                  */
232                 *handle = page_to_dma(dev, page);
233
234                 do {
235                         BUG_ON(!pte_none(*pte));
236
237                         /*
238                          * x86 does not mark the pages reserved...
239                          */
240                         SetPageReserved(page);
241                         set_pte_ext(pte, mk_pte(page, prot), 0);
242                         page++;
243                         pte++;
244                         off++;
245                         if (off >= PTRS_PER_PTE) {
246                                 off = 0;
247                                 pte = consistent_pte[++idx];
248                         }
249                 } while (size -= PAGE_SIZE);
250
251                 /*
252                  * Free the otherwise unused pages.
253                  */
254                 while (page < end) {
255                         __free_page(page);
256                         page++;
257                 }
258
259                 return (void *)c->vm_start;
260         }
261
262         if (page)
263                 __free_pages(page, order);
264  no_page:
265         *handle = ~0;
266         return NULL;
267 }
268
269 /*
270  * Allocate DMA-coherent memory space and return both the kernel remapped
271  * virtual and bus address for that space.
272  */
273 void *
274 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
275 {
276         if (arch_is_coherent()) {
277                 void *virt;
278
279                 virt = kmalloc(size, gfp);
280                 if (!virt)
281                         return NULL;
282                 *handle =  virt_to_dma(dev, virt);
283
284                 return virt;
285         }
286
287         return __dma_alloc(dev, size, handle, gfp,
288                            pgprot_noncached(pgprot_kernel));
289 }
290 EXPORT_SYMBOL(dma_alloc_coherent);
291
292 /*
293  * Allocate a writecombining region, in much the same way as
294  * dma_alloc_coherent above.
295  */
296 void *
297 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
298 {
299         return __dma_alloc(dev, size, handle, gfp,
300                            pgprot_writecombine(pgprot_kernel));
301 }
302 EXPORT_SYMBOL(dma_alloc_writecombine);
303
304 static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
305                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
306 {
307         unsigned long flags, user_size, kern_size;
308         struct vm_region *c;
309         int ret = -ENXIO;
310
311         user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
312
313         spin_lock_irqsave(&consistent_lock, flags);
314         c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
315         spin_unlock_irqrestore(&consistent_lock, flags);
316
317         if (c) {
318                 unsigned long off = vma->vm_pgoff;
319
320                 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
321
322                 if (off < kern_size &&
323                     user_size <= (kern_size - off)) {
324                         vma->vm_flags |= VM_RESERVED;
325                         ret = remap_pfn_range(vma, vma->vm_start,
326                                               page_to_pfn(c->vm_pages) + off,
327                                               user_size << PAGE_SHIFT,
328                                               vma->vm_page_prot);
329                 }
330         }
331
332         return ret;
333 }
334
335 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
336                       void *cpu_addr, dma_addr_t dma_addr, size_t size)
337 {
338         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
339         return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
340 }
341 EXPORT_SYMBOL(dma_mmap_coherent);
342
343 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
344                           void *cpu_addr, dma_addr_t dma_addr, size_t size)
345 {
346         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
347         return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
348 }
349 EXPORT_SYMBOL(dma_mmap_writecombine);
350
351 /*
352  * free a page as defined by the above mapping.
353  * Must not be called with IRQs disabled.
354  */
355 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
356 {
357         struct vm_region *c;
358         unsigned long flags, addr;
359         pte_t *ptep;
360         int idx;
361         u32 off;
362
363         WARN_ON(irqs_disabled());
364
365         if (arch_is_coherent()) {
366                 kfree(cpu_addr);
367                 return;
368         }
369
370         size = PAGE_ALIGN(size);
371
372         spin_lock_irqsave(&consistent_lock, flags);
373         c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
374         if (!c)
375                 goto no_area;
376
377         c->vm_active = 0;
378         spin_unlock_irqrestore(&consistent_lock, flags);
379
380         if ((c->vm_end - c->vm_start) != size) {
381                 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
382                        __func__, c->vm_end - c->vm_start, size);
383                 dump_stack();
384                 size = c->vm_end - c->vm_start;
385         }
386
387         idx = CONSISTENT_PTE_INDEX(c->vm_start);
388         off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
389         ptep = consistent_pte[idx] + off;
390         addr = c->vm_start;
391         do {
392                 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
393                 unsigned long pfn;
394
395                 ptep++;
396                 addr += PAGE_SIZE;
397                 off++;
398                 if (off >= PTRS_PER_PTE) {
399                         off = 0;
400                         ptep = consistent_pte[++idx];
401                 }
402
403                 if (!pte_none(pte) && pte_present(pte)) {
404                         pfn = pte_pfn(pte);
405
406                         if (pfn_valid(pfn)) {
407                                 struct page *page = pfn_to_page(pfn);
408
409                                 /*
410                                  * x86 does not mark the pages reserved...
411                                  */
412                                 ClearPageReserved(page);
413
414                                 __free_page(page);
415                                 continue;
416                         }
417                 }
418
419                 printk(KERN_CRIT "%s: bad page in kernel page table\n",
420                        __func__);
421         } while (size -= PAGE_SIZE);
422
423         flush_tlb_kernel_range(c->vm_start, c->vm_end);
424
425         spin_lock_irqsave(&consistent_lock, flags);
426         list_del(&c->vm_list);
427         spin_unlock_irqrestore(&consistent_lock, flags);
428
429         kfree(c);
430         return;
431
432  no_area:
433         spin_unlock_irqrestore(&consistent_lock, flags);
434         printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
435                __func__, cpu_addr);
436         dump_stack();
437 }
438 EXPORT_SYMBOL(dma_free_coherent);
439
440 /*
441  * Initialise the consistent memory allocation.
442  */
443 static int __init consistent_init(void)
444 {
445         pgd_t *pgd;
446         pmd_t *pmd;
447         pte_t *pte;
448         int ret = 0, i = 0;
449         u32 base = CONSISTENT_BASE;
450
451         do {
452                 pgd = pgd_offset(&init_mm, base);
453                 pmd = pmd_alloc(&init_mm, pgd, base);
454                 if (!pmd) {
455                         printk(KERN_ERR "%s: no pmd tables\n", __func__);
456                         ret = -ENOMEM;
457                         break;
458                 }
459                 WARN_ON(!pmd_none(*pmd));
460
461                 pte = pte_alloc_kernel(pmd, base);
462                 if (!pte) {
463                         printk(KERN_ERR "%s: no pte tables\n", __func__);
464                         ret = -ENOMEM;
465                         break;
466                 }
467
468                 consistent_pte[i++] = pte;
469                 base += (1 << PGDIR_SHIFT);
470         } while (base < CONSISTENT_END);
471
472         return ret;
473 }
474
475 core_initcall(consistent_init);
476
477 /*
478  * Make an area consistent for devices.
479  * Note: Drivers should NOT use this function directly, as it will break
480  * platforms with CONFIG_DMABOUNCE.
481  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
482  */
483 void consistent_sync(void *vaddr, size_t size, int direction)
484 {
485         unsigned long start = (unsigned long)vaddr;
486         unsigned long end   = start + size;
487
488         switch (direction) {
489         case DMA_FROM_DEVICE:           /* invalidate only */
490                 dmac_inv_range(start, end);
491                 break;
492         case DMA_TO_DEVICE:             /* writeback only */
493                 dmac_clean_range(start, end);
494                 break;
495         case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
496                 dmac_flush_range(start, end);
497                 break;
498         default:
499                 BUG();
500         }
501 }
502 EXPORT_SYMBOL(consistent_sync);