2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
18 #include <asm/cache.h>
21 #include <dma-coherence.h>
23 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
25 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
27 return (unsigned long)phys_to_virt(addr);
31 * Warning on the terminology - Linux calls an uncached area coherent;
32 * MIPS terminology calls memory areas with hardware maintained coherency
36 static inline int cpu_is_noncoherent_r10000(struct device *dev)
38 return !plat_device_is_coherent(dev) &&
39 (current_cpu_type() == CPU_R10000 ||
40 current_cpu_type() == CPU_R12000);
43 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
45 /* ignore region specifiers */
46 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
48 #ifdef CONFIG_ZONE_DMA
51 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
55 #ifdef CONFIG_ZONE_DMA32
56 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
62 /* Don't invoke OOM killer */
68 void *dma_alloc_noncoherent(struct device *dev, size_t size,
69 dma_addr_t * dma_handle, gfp_t gfp)
73 gfp = massage_gfp_flags(dev, gfp);
75 ret = (void *) __get_free_pages(gfp, get_order(size));
79 *dma_handle = plat_map_dma_mem(dev, ret, size);
85 EXPORT_SYMBOL(dma_alloc_noncoherent);
87 void *dma_alloc_coherent(struct device *dev, size_t size,
88 dma_addr_t * dma_handle, gfp_t gfp)
92 gfp = massage_gfp_flags(dev, gfp);
94 ret = (void *) __get_free_pages(gfp, get_order(size));
98 *dma_handle = plat_map_dma_mem(dev, ret, size);
100 if (!plat_device_is_coherent(dev)) {
101 dma_cache_wback_inv((unsigned long) ret, size);
102 ret = UNCAC_ADDR(ret);
109 EXPORT_SYMBOL(dma_alloc_coherent);
111 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
112 dma_addr_t dma_handle)
114 free_pages((unsigned long) vaddr, get_order(size));
117 EXPORT_SYMBOL(dma_free_noncoherent);
119 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
120 dma_addr_t dma_handle)
122 unsigned long addr = (unsigned long) vaddr;
124 if (!plat_device_is_coherent(dev))
125 addr = CAC_ADDR(addr);
127 free_pages(addr, get_order(size));
130 EXPORT_SYMBOL(dma_free_coherent);
132 static inline void __dma_sync(unsigned long addr, size_t size,
133 enum dma_data_direction direction)
137 dma_cache_wback(addr, size);
140 case DMA_FROM_DEVICE:
141 dma_cache_inv(addr, size);
144 case DMA_BIDIRECTIONAL:
145 dma_cache_wback_inv(addr, size);
153 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
154 enum dma_data_direction direction)
156 unsigned long addr = (unsigned long) ptr;
158 if (!plat_device_is_coherent(dev))
159 __dma_sync(addr, size, direction);
161 return plat_map_dma_mem(dev, ptr, size);
164 EXPORT_SYMBOL(dma_map_single);
166 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
167 enum dma_data_direction direction)
169 if (cpu_is_noncoherent_r10000(dev))
170 __dma_sync(dma_addr_to_virt(dma_addr), size,
173 plat_unmap_dma_mem(dma_addr);
176 EXPORT_SYMBOL(dma_unmap_single);
178 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
179 enum dma_data_direction direction)
183 BUG_ON(direction == DMA_NONE);
185 for (i = 0; i < nents; i++, sg++) {
188 addr = (unsigned long) sg_virt(sg);
189 if (!plat_device_is_coherent(dev) && addr)
190 __dma_sync(addr, sg->length, direction);
191 sg->dma_address = plat_map_dma_mem(dev,
192 (void *)addr, sg->length);
198 EXPORT_SYMBOL(dma_map_sg);
200 dma_addr_t dma_map_page(struct device *dev, struct page *page,
201 unsigned long offset, size_t size, enum dma_data_direction direction)
203 BUG_ON(direction == DMA_NONE);
205 if (!plat_device_is_coherent(dev)) {
208 addr = (unsigned long) page_address(page) + offset;
209 dma_cache_wback_inv(addr, size);
212 return plat_map_dma_mem_page(dev, page) + offset;
215 EXPORT_SYMBOL(dma_map_page);
217 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
218 enum dma_data_direction direction)
220 BUG_ON(direction == DMA_NONE);
222 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
225 addr = plat_dma_addr_to_phys(dma_address);
226 dma_cache_wback_inv(addr, size);
229 plat_unmap_dma_mem(dma_address);
232 EXPORT_SYMBOL(dma_unmap_page);
234 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
235 enum dma_data_direction direction)
240 BUG_ON(direction == DMA_NONE);
242 for (i = 0; i < nhwentries; i++, sg++) {
243 if (!plat_device_is_coherent(dev) &&
244 direction != DMA_TO_DEVICE) {
245 addr = (unsigned long) sg_virt(sg);
247 __dma_sync(addr, sg->length, direction);
249 plat_unmap_dma_mem(sg->dma_address);
253 EXPORT_SYMBOL(dma_unmap_sg);
255 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
256 size_t size, enum dma_data_direction direction)
258 BUG_ON(direction == DMA_NONE);
260 if (cpu_is_noncoherent_r10000(dev)) {
263 addr = dma_addr_to_virt(dma_handle);
264 __dma_sync(addr, size, direction);
268 EXPORT_SYMBOL(dma_sync_single_for_cpu);
270 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
271 size_t size, enum dma_data_direction direction)
273 BUG_ON(direction == DMA_NONE);
275 if (!plat_device_is_coherent(dev)) {
278 addr = dma_addr_to_virt(dma_handle);
279 __dma_sync(addr, size, direction);
283 EXPORT_SYMBOL(dma_sync_single_for_device);
285 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
286 unsigned long offset, size_t size, enum dma_data_direction direction)
288 BUG_ON(direction == DMA_NONE);
290 if (cpu_is_noncoherent_r10000(dev)) {
293 addr = dma_addr_to_virt(dma_handle);
294 __dma_sync(addr + offset, size, direction);
298 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
300 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
301 unsigned long offset, size_t size, enum dma_data_direction direction)
303 BUG_ON(direction == DMA_NONE);
305 if (!plat_device_is_coherent(dev)) {
308 addr = dma_addr_to_virt(dma_handle);
309 __dma_sync(addr + offset, size, direction);
313 EXPORT_SYMBOL(dma_sync_single_range_for_device);
315 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
316 enum dma_data_direction direction)
320 BUG_ON(direction == DMA_NONE);
322 /* Make sure that gcc doesn't leave the empty loop body. */
323 for (i = 0; i < nelems; i++, sg++) {
324 if (cpu_is_noncoherent_r10000(dev))
325 __dma_sync((unsigned long)page_address(sg_page(sg)),
326 sg->length, direction);
330 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
332 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
333 enum dma_data_direction direction)
337 BUG_ON(direction == DMA_NONE);
339 /* Make sure that gcc doesn't leave the empty loop body. */
340 for (i = 0; i < nelems; i++, sg++) {
341 if (!plat_device_is_coherent(dev))
342 __dma_sync((unsigned long)page_address(sg_page(sg)),
343 sg->length, direction);
347 EXPORT_SYMBOL(dma_sync_sg_for_device);
349 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354 EXPORT_SYMBOL(dma_mapping_error);
356 int dma_supported(struct device *dev, u64 mask)
359 * we fall back to GFP_DMA when the mask isn't all 1s,
360 * so we can't guarantee allocations that must be
361 * within a tighter range than GFP_DMA..
363 if (mask < DMA_BIT_MASK(24))
369 EXPORT_SYMBOL(dma_supported);
371 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
373 return plat_device_is_coherent(dev);
376 EXPORT_SYMBOL(dma_is_consistent);
378 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
379 enum dma_data_direction direction)
381 BUG_ON(direction == DMA_NONE);
383 if (!plat_device_is_coherent(dev))
384 __dma_sync((unsigned long)vaddr, size, direction);
387 EXPORT_SYMBOL(dma_cache_sync);