1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
4 #include <linux/config.h>
6 #include <asm/scatterlist.h>
7 #include <asm/cacheflush.h>
10 extern struct bus_type pci_bus_type;
12 /* arch/sh/mm/consistent.c */
13 extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
14 extern void consistent_free(void *vaddr, size_t size);
15 extern void consistent_sync(void *vaddr, size_t size, int direction);
17 #define dma_supported(dev, mask) (1)
19 static inline int dma_set_mask(struct device *dev, u64 mask)
21 if (!dev->dma_mask || !dma_supported(dev, mask))
24 *dev->dma_mask = mask;
29 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
30 dma_addr_t *dma_handle, gfp_t flag)
32 if (sh_mv.mv_consistent_alloc) {
35 ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag);
40 return consistent_alloc(flag, size, dma_handle);
43 static inline void dma_free_coherent(struct device *dev, size_t size,
44 void *vaddr, dma_addr_t dma_handle)
46 if (sh_mv.mv_consistent_free) {
49 ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle);
54 consistent_free(vaddr, size);
57 static inline void dma_cache_sync(void *vaddr, size_t size,
58 enum dma_data_direction dir)
60 consistent_sync(vaddr, size, (int)dir);
63 static inline dma_addr_t dma_map_single(struct device *dev,
64 void *ptr, size_t size,
65 enum dma_data_direction dir)
67 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
68 if (dev->bus == &pci_bus_type)
69 return virt_to_bus(ptr);
71 dma_cache_sync(ptr, size, dir);
73 return virt_to_bus(ptr);
76 #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
78 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
79 int nents, enum dma_data_direction dir)
83 for (i = 0; i < nents; i++) {
84 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
85 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
88 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
94 #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
96 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
97 unsigned long offset, size_t size,
98 enum dma_data_direction dir)
100 return dma_map_single(dev, page_address(page) + offset, size, dir);
103 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
104 size_t size, enum dma_data_direction dir)
106 dma_unmap_single(dev, dma_address, size, dir);
109 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
110 size_t size, enum dma_data_direction dir)
112 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
113 if (dev->bus == &pci_bus_type)
116 dma_cache_sync(bus_to_virt(dma_handle), size, dir);
119 static inline void dma_sync_single_range(struct device *dev,
120 dma_addr_t dma_handle,
121 unsigned long offset, size_t size,
122 enum dma_data_direction dir)
124 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
125 if (dev->bus == &pci_bus_type)
128 dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
131 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
132 int nelems, enum dma_data_direction dir)
136 for (i = 0; i < nelems; i++) {
137 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
138 dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
141 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
145 static void dma_sync_single_for_cpu(struct device *dev,
146 dma_addr_t dma_handle, size_t size,
147 enum dma_data_direction dir)
148 __attribute__ ((alias("dma_sync_single")));
150 static void dma_sync_single_for_device(struct device *dev,
151 dma_addr_t dma_handle, size_t size,
152 enum dma_data_direction dir)
153 __attribute__ ((alias("dma_sync_single")));
155 static void dma_sync_sg_for_cpu(struct device *dev,
156 struct scatterlist *sg, int nelems,
157 enum dma_data_direction dir)
158 __attribute__ ((alias("dma_sync_sg")));
160 static void dma_sync_sg_for_device(struct device *dev,
161 struct scatterlist *sg, int nelems,
162 enum dma_data_direction dir)
163 __attribute__ ((alias("dma_sync_sg")));
165 static inline int dma_get_cache_alignment(void)
168 * Each processor family will define its own L1_CACHE_SHIFT,
169 * L1_CACHE_BYTES wraps to this, so this is always safe.
171 return L1_CACHE_BYTES;
174 static inline int dma_mapping_error(dma_addr_t dma_addr)
176 return dma_addr == 0;
179 #endif /* __ASM_SH_DMA_MAPPING_H */