2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
17 #include <asm/cache.h>
20 #include <dma-coherence.h>
22 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
24 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
26 return (unsigned long)phys_to_virt(addr);
30 * Warning on the terminology - Linux calls an uncached area coherent;
31 * MIPS terminology calls memory areas with hardware maintained coherency
35 static inline int cpu_is_noncoherent_r10000(struct device *dev)
37 return !plat_device_is_coherent(dev) &&
38 (current_cpu_data.cputype == CPU_R10000 &&
39 current_cpu_data.cputype == CPU_R12000);
42 void *dma_alloc_noncoherent(struct device *dev, size_t size,
43 dma_addr_t * dma_handle, gfp_t gfp)
47 /* ignore region specifiers */
48 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
50 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
52 ret = (void *) __get_free_pages(gfp, get_order(size));
56 *dma_handle = plat_map_dma_mem(dev, ret, size);
62 EXPORT_SYMBOL(dma_alloc_noncoherent);
64 void *dma_alloc_coherent(struct device *dev, size_t size,
65 dma_addr_t * dma_handle, gfp_t gfp)
69 /* ignore region specifiers */
70 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
72 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
74 ret = (void *) __get_free_pages(gfp, get_order(size));
78 *dma_handle = plat_map_dma_mem(dev, ret, size);
80 if (!plat_device_is_coherent(dev)) {
81 dma_cache_wback_inv((unsigned long) ret, size);
82 ret = UNCAC_ADDR(ret);
89 EXPORT_SYMBOL(dma_alloc_coherent);
91 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
92 dma_addr_t dma_handle)
94 free_pages((unsigned long) vaddr, get_order(size));
97 EXPORT_SYMBOL(dma_free_noncoherent);
99 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
100 dma_addr_t dma_handle)
102 unsigned long addr = (unsigned long) vaddr;
104 if (!plat_device_is_coherent(dev))
105 addr = CAC_ADDR(addr);
107 free_pages(addr, get_order(size));
110 EXPORT_SYMBOL(dma_free_coherent);
112 static inline void __dma_sync(unsigned long addr, size_t size,
113 enum dma_data_direction direction)
117 dma_cache_wback(addr, size);
120 case DMA_FROM_DEVICE:
121 dma_cache_inv(addr, size);
124 case DMA_BIDIRECTIONAL:
125 dma_cache_wback_inv(addr, size);
133 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
134 enum dma_data_direction direction)
136 unsigned long addr = (unsigned long) ptr;
138 if (!plat_device_is_coherent(dev))
139 __dma_sync(addr, size, direction);
141 return plat_map_dma_mem(dev, ptr, size);
144 EXPORT_SYMBOL(dma_map_single);
146 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
147 enum dma_data_direction direction)
149 if (cpu_is_noncoherent_r10000(dev))
150 __dma_sync(dma_addr_to_virt(dma_addr), size,
153 plat_unmap_dma_mem(dma_addr);
156 EXPORT_SYMBOL(dma_unmap_single);
158 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
159 enum dma_data_direction direction)
163 BUG_ON(direction == DMA_NONE);
165 for (i = 0; i < nents; i++, sg++) {
168 addr = (unsigned long) page_address(sg->page);
169 if (!plat_device_is_coherent(dev) && addr)
170 __dma_sync(addr + sg->offset, sg->length, direction);
171 sg->dma_address = plat_map_dma_mem(dev,
172 (void *)(addr + sg->offset),
179 EXPORT_SYMBOL(dma_map_sg);
181 dma_addr_t dma_map_page(struct device *dev, struct page *page,
182 unsigned long offset, size_t size, enum dma_data_direction direction)
184 BUG_ON(direction == DMA_NONE);
186 if (!plat_device_is_coherent(dev)) {
189 addr = (unsigned long) page_address(page) + offset;
190 dma_cache_wback_inv(addr, size);
193 return plat_map_dma_mem_page(dev, page) + offset;
196 EXPORT_SYMBOL(dma_map_page);
198 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
199 enum dma_data_direction direction)
201 BUG_ON(direction == DMA_NONE);
203 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
206 addr = plat_dma_addr_to_phys(dma_address);
207 dma_cache_wback_inv(addr, size);
210 plat_unmap_dma_mem(dma_address);
213 EXPORT_SYMBOL(dma_unmap_page);
215 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
216 enum dma_data_direction direction)
221 BUG_ON(direction == DMA_NONE);
223 for (i = 0; i < nhwentries; i++, sg++) {
224 if (!plat_device_is_coherent(dev) &&
225 direction != DMA_TO_DEVICE) {
226 addr = (unsigned long) page_address(sg->page);
228 __dma_sync(addr + sg->offset, sg->length,
231 plat_unmap_dma_mem(sg->dma_address);
235 EXPORT_SYMBOL(dma_unmap_sg);
237 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
238 size_t size, enum dma_data_direction direction)
240 BUG_ON(direction == DMA_NONE);
242 if (cpu_is_noncoherent_r10000(dev)) {
245 addr = dma_addr_to_virt(dma_handle);
246 __dma_sync(addr, size, direction);
250 EXPORT_SYMBOL(dma_sync_single_for_cpu);
252 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
253 size_t size, enum dma_data_direction direction)
255 BUG_ON(direction == DMA_NONE);
257 if (!plat_device_is_coherent(dev)) {
260 addr = dma_addr_to_virt(dma_handle);
261 __dma_sync(addr, size, direction);
265 EXPORT_SYMBOL(dma_sync_single_for_device);
267 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
268 unsigned long offset, size_t size, enum dma_data_direction direction)
270 BUG_ON(direction == DMA_NONE);
272 if (cpu_is_noncoherent_r10000(dev)) {
275 addr = dma_addr_to_virt(dma_handle);
276 __dma_sync(addr + offset, size, direction);
280 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
282 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
283 unsigned long offset, size_t size, enum dma_data_direction direction)
285 BUG_ON(direction == DMA_NONE);
287 if (!plat_device_is_coherent(dev)) {
290 addr = dma_addr_to_virt(dma_handle);
291 __dma_sync(addr + offset, size, direction);
295 EXPORT_SYMBOL(dma_sync_single_range_for_device);
297 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
298 enum dma_data_direction direction)
302 BUG_ON(direction == DMA_NONE);
304 /* Make sure that gcc doesn't leave the empty loop body. */
305 for (i = 0; i < nelems; i++, sg++) {
306 if (cpu_is_noncoherent_r10000(dev))
307 __dma_sync((unsigned long)page_address(sg->page),
308 sg->length, direction);
309 plat_unmap_dma_mem(sg->dma_address);
313 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
315 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
316 enum dma_data_direction direction)
320 BUG_ON(direction == DMA_NONE);
322 /* Make sure that gcc doesn't leave the empty loop body. */
323 for (i = 0; i < nelems; i++, sg++) {
324 if (!plat_device_is_coherent(dev))
325 __dma_sync((unsigned long)page_address(sg->page),
326 sg->length, direction);
327 plat_unmap_dma_mem(sg->dma_address);
331 EXPORT_SYMBOL(dma_sync_sg_for_device);
333 int dma_mapping_error(dma_addr_t dma_addr)
338 EXPORT_SYMBOL(dma_mapping_error);
340 int dma_supported(struct device *dev, u64 mask)
343 * we fall back to GFP_DMA when the mask isn't all 1s,
344 * so we can't guarantee allocations that must be
345 * within a tighter range than GFP_DMA..
347 if (mask < 0x00ffffff)
353 EXPORT_SYMBOL(dma_supported);
355 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
357 return plat_device_is_coherent(dev);
360 EXPORT_SYMBOL(dma_is_consistent);
362 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
363 enum dma_data_direction direction)
365 BUG_ON(direction == DMA_NONE);
367 if (!plat_device_is_coherent(dev))
368 dma_cache_wback_inv((unsigned long)vaddr, size);
371 EXPORT_SYMBOL(dma_cache_sync);