1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/config.h>
6 #include <asm/scatterlist.h>
9 extern struct bus_type pci_bus_type;
11 /* arch/sh/mm/consistent.c */
12 extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
13 extern void consistent_free(void *vaddr, size_t size);
14 extern void consistent_sync(void *vaddr, size_t size, int direction);
16 #define dma_supported(dev, mask) (1)
18 static inline int dma_set_mask(struct device *dev, u64 mask)
20 if (!dev->dma_mask || !dma_supported(dev, mask))
23 *dev->dma_mask = mask;
28 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
29 dma_addr_t *dma_handle, int flag)
31 if (sh_mv.mv_consistent_alloc) {
34 ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag);
39 return consistent_alloc(flag, size, dma_handle);
42 static inline void dma_free_coherent(struct device *dev, size_t size,
43 void *vaddr, dma_addr_t dma_handle)
45 if (sh_mv.mv_consistent_free) {
48 ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
53 consistent_free(vaddr, size);
56 static inline void dma_cache_sync(void *vaddr, size_t size,
57 enum dma_data_direction dir)
59 consistent_sync(vaddr, size, (int)dir);
62 static inline dma_addr_t dma_map_single(struct device *dev,
63 void *ptr, size_t size,
64 enum dma_data_direction dir)
66 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
67 if (dev->bus == &pci_bus_type)
68 return virt_to_bus(ptr);
70 dma_cache_sync(ptr, size, dir);
72 return virt_to_bus(ptr);
75 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
77 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
78 int nents, enum dma_data_direction dir)
82 for (i = 0; i < nents; i++) {
83 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
84 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
87 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
93 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
95 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
96 unsigned long offset, size_t size,
97 enum dma_data_direction dir)
99 return dma_map_single(dev, page_address(page) + offset, size, dir);
102 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
103 size_t size, enum dma_data_direction dir)
105 dma_unmap_single(dev, dma_address, size, dir);
108 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
109 size_t size, enum dma_data_direction dir)
111 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
112 if (dev->bus == &pci_bus_type)
115 dma_cache_sync(bus_to_virt(dma_handle), size, dir);
118 static inline void dma_sync_single_range(struct device *dev,
119 dma_addr_t dma_handle,
120 unsigned long offset, size_t size,
121 enum dma_data_direction dir)
123 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
124 if (dev->bus == &pci_bus_type)
127 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
130 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
131 int nelems, enum dma_data_direction dir)
135 for (i = 0; i < nelems; i++) {
136 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
137 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
140 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
144 static inline void dma_sync_single_for_cpu(struct device *dev,
145 dma_addr_t dma_handle, size_t size,
146 enum dma_data_direction dir)
147 __attribute__ ((alias("dma_sync_single")));
149 static inline void dma_sync_single_for_device(struct device *dev,
150 dma_addr_t dma_handle, size_t size,
151 enum dma_data_direction dir)
152 __attribute__ ((alias("dma_sync_single")));
154 static inline void dma_sync_sg_for_cpu(struct device *dev,
155 struct scatterlist *sg, int nelems,
156 enum dma_data_direction dir)
157 __attribute__ ((alias("dma_sync_sg")));
159 static inline void dma_sync_sg_for_device(struct device *dev,
160 struct scatterlist *sg, int nelems,
161 enum dma_data_direction dir)
162 __attribute__ ((alias("dma_sync_sg")));
164 static inline int dma_get_cache_alignment(void)
167 * Each processor family will define its own L1_CACHE_SHIFT,
168 * L1_CACHE_BYTES wraps to this, so this is always safe.
170 return L1_CACHE_BYTES;
173 static inline int dma_mapping_error(dma_addr_t dma_addr)
175 return dma_addr == 0;
178 #endif /* __ASM_SH_DMA_MAPPING_H */