2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/dma-mapping.h>
11 #include <asm/addrspace.h>
12 #include <asm/cacheflush.h>
14 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
17 * No need to sync an uncached area
19 if (PXSEG(vaddr) == P2SEG)
23 case DMA_FROM_DEVICE: /* invalidate only */
24 invalidate_dcache_region(vaddr, size);
26 case DMA_TO_DEVICE: /* writeback only */
27 clean_dcache_region(vaddr, size);
29 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
30 flush_dcache_region(vaddr, size);
36 EXPORT_SYMBOL(dma_cache_sync);
38 static struct page *__dma_alloc(struct device *dev, size_t size,
39 dma_addr_t *handle, gfp_t gfp)
41 struct page *page, *free, *end;
44 /* Following is a work-around (a.k.a. hack) to prevent pages
45 * with __GFP_COMP being passed to split_page() which cannot
46 * handle them. The real problem is that this flag probably
47 * should be 0 on AVR32 as it is not supported on this
48 * platform--see CONFIG_HUGETLB_PAGE. */
51 size = PAGE_ALIGN(size);
52 order = get_order(size);
54 page = alloc_pages(gfp, order);
57 split_page(page, order);
60 * When accessing physical memory with valid cache data, we
61 * get a cache hit even if the virtual memory region is marked
64 * Since the memory is newly allocated, there is no point in
65 * doing a writeback. If the previous owner cares, he should
66 * have flushed the cache before releasing the memory.
68 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
70 *handle = page_to_bus(page);
71 free = page + (size >> PAGE_SHIFT);
72 end = page + (1 << order);
75 * Free any unused pages
85 static void __dma_free(struct device *dev, size_t size,
86 struct page *page, dma_addr_t handle)
88 struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
94 void *dma_alloc_coherent(struct device *dev, size_t size,
95 dma_addr_t *handle, gfp_t gfp)
100 page = __dma_alloc(dev, size, handle, gfp);
102 ret = phys_to_uncached(page_to_phys(page));
106 EXPORT_SYMBOL(dma_alloc_coherent);
108 void dma_free_coherent(struct device *dev, size_t size,
109 void *cpu_addr, dma_addr_t handle)
111 void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
114 pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
115 cpu_addr, (unsigned long)handle, (unsigned)size);
116 BUG_ON(!virt_addr_valid(addr));
117 page = virt_to_page(addr);
118 __dma_free(dev, size, page, handle);
120 EXPORT_SYMBOL(dma_free_coherent);
122 void *dma_alloc_writecombine(struct device *dev, size_t size,
123 dma_addr_t *handle, gfp_t gfp)
128 page = __dma_alloc(dev, size, handle, gfp);
132 phys = page_to_phys(page);
135 /* Now, map the page into P3 with write-combining turned on */
136 return __ioremap(phys, size, _PAGE_BUFFER);
138 EXPORT_SYMBOL(dma_alloc_writecombine);
140 void dma_free_writecombine(struct device *dev, size_t size,
141 void *cpu_addr, dma_addr_t handle)
147 page = phys_to_page(handle);
148 __dma_free(dev, size, page, handle);
150 EXPORT_SYMBOL(dma_free_writecombine);