Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[linux-2.6] / include / asm-sh / dma-mapping.h
1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5 #include <linux/scatterlist.h>
6 #include <asm/cacheflush.h>
7 #include <asm/io.h>
8 #include <asm-generic/dma-coherent.h>
9
10 extern struct bus_type pci_bus_type;
11
12 #define dma_supported(dev, mask)        (1)
13
14 static inline int dma_set_mask(struct device *dev, u64 mask)
15 {
16         if (!dev->dma_mask || !dma_supported(dev, mask))
17                 return -EIO;
18
19         *dev->dma_mask = mask;
20
21         return 0;
22 }
23
24 void *dma_alloc_coherent(struct device *dev, size_t size,
25                          dma_addr_t *dma_handle, gfp_t flag);
26
27 void dma_free_coherent(struct device *dev, size_t size,
28                        void *vaddr, dma_addr_t dma_handle);
29
30 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
31                     enum dma_data_direction dir);
32
33 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
34 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
35 #define dma_is_consistent(d, h) (1)
36
37 static inline dma_addr_t dma_map_single(struct device *dev,
38                                         void *ptr, size_t size,
39                                         enum dma_data_direction dir)
40 {
41 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
42         if (dev->bus == &pci_bus_type)
43                 return virt_to_phys(ptr);
44 #endif
45         dma_cache_sync(dev, ptr, size, dir);
46
47         return virt_to_phys(ptr);
48 }
49
50 #define dma_unmap_single(dev, addr, size, dir)  do { } while (0)
51
52 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
53                              int nents, enum dma_data_direction dir)
54 {
55         int i;
56
57         for (i = 0; i < nents; i++) {
58 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
59                 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
60 #endif
61                 sg[i].dma_address = sg_phys(&sg[i]);
62         }
63
64         return nents;
65 }
66
67 #define dma_unmap_sg(dev, sg, nents, dir)       do { } while (0)
68
69 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
70                                       unsigned long offset, size_t size,
71                                       enum dma_data_direction dir)
72 {
73         return dma_map_single(dev, page_address(page) + offset, size, dir);
74 }
75
76 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
77                                   size_t size, enum dma_data_direction dir)
78 {
79         dma_unmap_single(dev, dma_address, size, dir);
80 }
81
82 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
83                                    size_t size, enum dma_data_direction dir)
84 {
85 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
86         if (dev->bus == &pci_bus_type)
87                 return;
88 #endif
89         dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
90 }
91
92 static inline void dma_sync_single_range(struct device *dev,
93                                          dma_addr_t dma_handle,
94                                          unsigned long offset, size_t size,
95                                          enum dma_data_direction dir)
96 {
97 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
98         if (dev->bus == &pci_bus_type)
99                 return;
100 #endif
101         dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
102 }
103
104 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
105                                int nelems, enum dma_data_direction dir)
106 {
107         int i;
108
109         for (i = 0; i < nelems; i++) {
110 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
111                 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
112 #endif
113                 sg[i].dma_address = sg_phys(&sg[i]);
114         }
115 }
116
117 static inline void dma_sync_single_for_cpu(struct device *dev,
118                                            dma_addr_t dma_handle, size_t size,
119                                            enum dma_data_direction dir)
120 {
121         dma_sync_single(dev, dma_handle, size, dir);
122 }
123
124 static inline void dma_sync_single_for_device(struct device *dev,
125                                               dma_addr_t dma_handle,
126                                               size_t size,
127                                               enum dma_data_direction dir)
128 {
129         dma_sync_single(dev, dma_handle, size, dir);
130 }
131
132 static inline void dma_sync_single_range_for_cpu(struct device *dev,
133                                                  dma_addr_t dma_handle,
134                                                  unsigned long offset,
135                                                  size_t size,
136                                                  enum dma_data_direction direction)
137 {
138         dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
139 }
140
141 static inline void dma_sync_single_range_for_device(struct device *dev,
142                                                     dma_addr_t dma_handle,
143                                                     unsigned long offset,
144                                                     size_t size,
145                                                     enum dma_data_direction direction)
146 {
147         dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
148 }
149
150
151 static inline void dma_sync_sg_for_cpu(struct device *dev,
152                                        struct scatterlist *sg, int nelems,
153                                        enum dma_data_direction dir)
154 {
155         dma_sync_sg(dev, sg, nelems, dir);
156 }
157
158 static inline void dma_sync_sg_for_device(struct device *dev,
159                                           struct scatterlist *sg, int nelems,
160                                           enum dma_data_direction dir)
161 {
162         dma_sync_sg(dev, sg, nelems, dir);
163 }
164
165
166 static inline int dma_get_cache_alignment(void)
167 {
168         /*
169          * Each processor family will define its own L1_CACHE_SHIFT,
170          * L1_CACHE_BYTES wraps to this, so this is always safe.
171          */
172         return L1_CACHE_BYTES;
173 }
174
175 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
176 {
177         return dma_addr == 0;
178 }
179
180 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
181
182 extern int
183 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
184                             dma_addr_t device_addr, size_t size, int flags);
185
186 extern void
187 dma_release_declared_memory(struct device *dev);
188
189 extern void *
190 dma_mark_declared_memory_occupied(struct device *dev,
191                                   dma_addr_t device_addr, size_t size);
192
193 #endif /* __ASM_SH_DMA_MAPPING_H */