2 * This is based on both include/asm-sh/dma-mapping.h and
3 * include/asm-ppc/pci.h
5 #ifndef __ASM_PPC_DMA_MAPPING_H
6 #define __ASM_PPC_DMA_MAPPING_H
8 #include <linux/config.h>
9 /* need struct page definitions */
11 #include <asm/scatterlist.h>
14 #ifdef CONFIG_NOT_COHERENT_CACHE
16 * DMA-consistent mapping functions for PowerPCs that don't support
17 * cache snooping. These allocate/free a region of uncached mapped
18 * memory space for use with DMA devices. Alternatively, you could
19 * allocate the space "normally" and use the cache management functions
20 * to ensure it is consistent.
22 extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
23 extern void __dma_free_coherent(size_t size, void *vaddr);
24 extern void __dma_sync(void *vaddr, size_t size, int direction);
25 extern void __dma_sync_page(struct page *page, unsigned long offset,
26 size_t size, int direction);
27 #define dma_cache_inv(_start,_size) \
28 invalidate_dcache_range(_start, (_start + _size))
29 #define dma_cache_wback(_start,_size) \
30 clean_dcache_range(_start, (_start + _size))
31 #define dma_cache_wback_inv(_start,_size) \
32 flush_dcache_range(_start, (_start + _size))
34 #else /* ! CONFIG_NOT_COHERENT_CACHE */
36 * Cache coherent cores.
39 #define dma_cache_inv(_start,_size) do { } while (0)
40 #define dma_cache_wback(_start,_size) do { } while (0)
41 #define dma_cache_wback_inv(_start,_size) do { } while (0)
43 #define __dma_alloc_coherent(gfp, size, handle) NULL
44 #define __dma_free_coherent(size, addr) do { } while (0)
45 #define __dma_sync(addr, size, rw) do { } while (0)
46 #define __dma_sync_page(pg, off, sz, rw) do { } while (0)
48 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
50 #define dma_supported(dev, mask) (1)
52 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
54 if (!dev->dma_mask || !dma_supported(dev, mask))
57 *dev->dma_mask = dma_mask;
62 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
63 dma_addr_t * dma_handle,
66 #ifdef CONFIG_NOT_COHERENT_CACHE
67 return __dma_alloc_coherent(size, dma_handle, gfp);
70 /* ignore region specifiers */
71 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
73 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
76 ret = (void *)__get_free_pages(gfp, get_order(size));
80 *dma_handle = virt_to_bus(ret);
88 dma_free_coherent(struct device *dev, size_t size, void *vaddr,
89 dma_addr_t dma_handle)
91 #ifdef CONFIG_NOT_COHERENT_CACHE
92 __dma_free_coherent(size, vaddr);
94 free_pages((unsigned long)vaddr, get_order(size));
98 static inline dma_addr_t
99 dma_map_single(struct device *dev, void *ptr, size_t size,
100 enum dma_data_direction direction)
102 BUG_ON(direction == DMA_NONE);
104 __dma_sync(ptr, size, direction);
106 return virt_to_bus(ptr);
110 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
112 static inline dma_addr_t
113 dma_map_page(struct device *dev, struct page *page,
114 unsigned long offset, size_t size,
115 enum dma_data_direction direction)
117 BUG_ON(direction == DMA_NONE);
119 __dma_sync_page(page, offset, size, direction);
121 return page_to_bus(page) + offset;
125 #define dma_unmap_page(dev, handle, size, dir) do { } while (0)
128 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
129 enum dma_data_direction direction)
133 BUG_ON(direction == DMA_NONE);
135 for (i = 0; i < nents; i++, sg++) {
137 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
138 sg->dma_address = page_to_bus(sg->page) + sg->offset;
144 /* We don't do anything here. */
145 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
148 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
150 enum dma_data_direction direction)
152 BUG_ON(direction == DMA_NONE);
154 __dma_sync(bus_to_virt(dma_handle), size, direction);
158 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
160 enum dma_data_direction direction)
162 BUG_ON(direction == DMA_NONE);
164 __dma_sync(bus_to_virt(dma_handle), size, direction);
168 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
169 enum dma_data_direction direction)
173 BUG_ON(direction == DMA_NONE);
175 for (i = 0; i < nents; i++, sg++)
176 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
180 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
181 enum dma_data_direction direction)
185 BUG_ON(direction == DMA_NONE);
187 for (i = 0; i < nents; i++, sg++)
188 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
191 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
192 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
193 #ifdef CONFIG_NOT_COHERENT_CACHE
194 #define dma_is_consistent(d) (0)
196 #define dma_is_consistent(d) (1)
199 static inline int dma_get_cache_alignment(void)
202 * Each processor family will define its own L1_CACHE_SHIFT,
203 * L1_CACHE_BYTES wraps to this, so this is always safe.
205 return L1_CACHE_BYTES;
209 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
210 unsigned long offset, size_t size,
211 enum dma_data_direction direction)
213 /* just sync everything for now */
214 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
218 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
219 unsigned long offset, size_t size,
220 enum dma_data_direction direction)
222 /* just sync everything for now */
223 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
226 static inline void dma_cache_sync(void *vaddr, size_t size,
227 enum dma_data_direction direction)
229 __dma_sync(vaddr, size, (int)direction);
232 static inline int dma_mapping_error(dma_addr_t dma_addr)
237 #endif /* __ASM_PPC_DMA_MAPPING_H */