2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya.
12 #include <linux/types.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
16 #include <linux/dma-mapping.h>
18 #include <asm/cache.h>
20 #include <asm/ip32/crime.h>
23 * Warning on the terminology - Linux calls an uncached area coherent;
24 * MIPS terminology calls memory areas with hardware maintained coherency
30 * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
31 * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian)
32 * 3. All other devices see memory as one big chunk at 0x40000000
33 * 4. Non-PCI devices will pass NULL as struct device*
34 * Thus we translate differently, depending on device.
37 #define RAM_OFFSET_MASK 0x3fffffff
39 void *dma_alloc_noncoherent(struct device *dev, size_t size,
40 dma_addr_t * dma_handle, gfp_t gfp)
43 /* ignore region specifiers */
44 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
46 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
48 ret = (void *) __get_free_pages(gfp, get_order(size));
51 unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK;
54 addr+= CRIME_HI_MEM_BASE;
61 EXPORT_SYMBOL(dma_alloc_noncoherent);
63 void *dma_alloc_coherent(struct device *dev, size_t size,
64 dma_addr_t * dma_handle, gfp_t gfp)
68 ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
70 dma_cache_wback_inv((unsigned long) ret, size);
71 ret = UNCAC_ADDR(ret);
77 EXPORT_SYMBOL(dma_alloc_coherent);
79 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
80 dma_addr_t dma_handle)
82 free_pages((unsigned long) vaddr, get_order(size));
85 EXPORT_SYMBOL(dma_free_noncoherent);
87 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
88 dma_addr_t dma_handle)
90 unsigned long addr = (unsigned long) vaddr;
92 addr = CAC_ADDR(addr);
93 free_pages(addr, get_order(size));
96 EXPORT_SYMBOL(dma_free_coherent);
98 static inline void __dma_sync(unsigned long addr, size_t size,
99 enum dma_data_direction direction)
103 dma_cache_wback(addr, size);
106 case DMA_FROM_DEVICE:
107 dma_cache_inv(addr, size);
110 case DMA_BIDIRECTIONAL:
111 dma_cache_wback_inv(addr, size);
119 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
120 enum dma_data_direction direction)
122 unsigned long addr = (unsigned long) ptr;
126 dma_cache_wback(addr, size);
129 case DMA_FROM_DEVICE:
130 dma_cache_inv(addr, size);
133 case DMA_BIDIRECTIONAL:
134 dma_cache_wback_inv(addr, size);
141 addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;
143 addr+=CRIME_HI_MEM_BASE;
144 return (dma_addr_t)addr;
147 EXPORT_SYMBOL(dma_map_single);
149 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
150 enum dma_data_direction direction)
156 case DMA_FROM_DEVICE:
159 case DMA_BIDIRECTIONAL:
167 EXPORT_SYMBOL(dma_unmap_single);
169 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
170 enum dma_data_direction direction)
174 BUG_ON(direction == DMA_NONE);
176 for (i = 0; i < nents; i++, sg++) {
179 addr = (unsigned long) page_address(sg->page)+sg->offset;
181 __dma_sync(addr, sg->length, direction);
182 addr = __pa(addr)&RAM_OFFSET_MASK;
184 addr += CRIME_HI_MEM_BASE;
185 sg->dma_address = (dma_addr_t)addr;
191 EXPORT_SYMBOL(dma_map_sg);
193 dma_addr_t dma_map_page(struct device *dev, struct page *page,
194 unsigned long offset, size_t size, enum dma_data_direction direction)
198 BUG_ON(direction == DMA_NONE);
200 addr = (unsigned long) page_address(page) + offset;
201 dma_cache_wback_inv(addr, size);
202 addr = __pa(addr)&RAM_OFFSET_MASK;
204 addr += CRIME_HI_MEM_BASE;
206 return (dma_addr_t)addr;
209 EXPORT_SYMBOL(dma_map_page);
211 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
212 enum dma_data_direction direction)
214 BUG_ON(direction == DMA_NONE);
216 if (direction != DMA_TO_DEVICE) {
219 dma_address&=RAM_OFFSET_MASK;
220 addr = dma_address + PAGE_OFFSET;
221 if(dma_address>=256*1024*1024)
222 addr+=CRIME_HI_MEM_BASE;
223 dma_cache_wback_inv(addr, size);
227 EXPORT_SYMBOL(dma_unmap_page);
229 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
230 enum dma_data_direction direction)
235 BUG_ON(direction == DMA_NONE);
237 if (direction == DMA_TO_DEVICE)
240 for (i = 0; i < nhwentries; i++, sg++) {
241 addr = (unsigned long) page_address(sg->page);
244 dma_cache_wback_inv(addr + sg->offset, sg->length);
248 EXPORT_SYMBOL(dma_unmap_sg);
250 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
251 size_t size, enum dma_data_direction direction)
255 BUG_ON(direction == DMA_NONE);
257 dma_handle&=RAM_OFFSET_MASK;
258 addr = dma_handle + PAGE_OFFSET;
259 if(dma_handle>=256*1024*1024)
260 addr+=CRIME_HI_MEM_BASE;
261 __dma_sync(addr, size, direction);
264 EXPORT_SYMBOL(dma_sync_single_for_cpu);
266 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
267 size_t size, enum dma_data_direction direction)
271 BUG_ON(direction == DMA_NONE);
273 dma_handle&=RAM_OFFSET_MASK;
274 addr = dma_handle + PAGE_OFFSET;
275 if(dma_handle>=256*1024*1024)
276 addr+=CRIME_HI_MEM_BASE;
277 __dma_sync(addr, size, direction);
280 EXPORT_SYMBOL(dma_sync_single_for_device);
282 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
283 unsigned long offset, size_t size, enum dma_data_direction direction)
287 BUG_ON(direction == DMA_NONE);
289 dma_handle&=RAM_OFFSET_MASK;
290 addr = dma_handle + offset + PAGE_OFFSET;
291 if(dma_handle>=256*1024*1024)
292 addr+=CRIME_HI_MEM_BASE;
293 __dma_sync(addr, size, direction);
296 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
298 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
299 unsigned long offset, size_t size, enum dma_data_direction direction)
303 BUG_ON(direction == DMA_NONE);
305 dma_handle&=RAM_OFFSET_MASK;
306 addr = dma_handle + offset + PAGE_OFFSET;
307 if(dma_handle>=256*1024*1024)
308 addr+=CRIME_HI_MEM_BASE;
309 __dma_sync(addr, size, direction);
312 EXPORT_SYMBOL(dma_sync_single_range_for_device);
314 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
315 enum dma_data_direction direction)
319 BUG_ON(direction == DMA_NONE);
321 /* Make sure that gcc doesn't leave the empty loop body. */
322 for (i = 0; i < nelems; i++, sg++)
323 __dma_sync((unsigned long)page_address(sg->page),
324 sg->length, direction);
327 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
329 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
330 enum dma_data_direction direction)
334 BUG_ON(direction == DMA_NONE);
336 /* Make sure that gcc doesn't leave the empty loop body. */
337 for (i = 0; i < nelems; i++, sg++)
338 __dma_sync((unsigned long)page_address(sg->page),
339 sg->length, direction);
342 EXPORT_SYMBOL(dma_sync_sg_for_device);
344 int dma_mapping_error(dma_addr_t dma_addr)
349 EXPORT_SYMBOL(dma_mapping_error);
351 int dma_supported(struct device *dev, u64 mask)
354 * we fall back to GFP_DMA when the mask isn't all 1s,
355 * so we can't guarantee allocations that must be
356 * within a tighter range than GFP_DMA..
358 if (mask < 0x00ffffff)
364 EXPORT_SYMBOL(dma_supported);
366 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
371 EXPORT_SYMBOL(dma_is_consistent);
373 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
374 enum dma_data_direction direction)
376 if (direction == DMA_NONE)
379 dma_cache_wback_inv((unsigned long)vaddr, size);
382 EXPORT_SYMBOL(dma_cache_sync);