Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[linux-2.6] / include / asm-x86_64 / dma-mapping.h
1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9
10 #include <asm/scatterlist.h>
11 #include <asm/io.h>
12 #include <asm/swiotlb.h>
13
14 struct dma_mapping_ops {
15         int             (*mapping_error)(dma_addr_t dma_addr);
16         void*           (*alloc_coherent)(struct device *dev, size_t size,
17                                 dma_addr_t *dma_handle, gfp_t gfp);
18         void            (*free_coherent)(struct device *dev, size_t size,
19                                 void *vaddr, dma_addr_t dma_handle);
20         dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
21                                 size_t size, int direction);
22         /* like map_single, but doesn't check the device mask */
23         dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
24                                 size_t size, int direction);
25         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
26                                 size_t size, int direction);
27         void            (*sync_single_for_cpu)(struct device *hwdev,
28                                 dma_addr_t dma_handle, size_t size,
29                                 int direction);
30         void            (*sync_single_for_device)(struct device *hwdev,
31                                 dma_addr_t dma_handle, size_t size,
32                                 int direction);
33         void            (*sync_single_range_for_cpu)(struct device *hwdev,
34                                 dma_addr_t dma_handle, unsigned long offset,
35                                 size_t size, int direction);
36         void            (*sync_single_range_for_device)(struct device *hwdev,
37                                 dma_addr_t dma_handle, unsigned long offset,
38                                 size_t size, int direction);
39         void            (*sync_sg_for_cpu)(struct device *hwdev,
40                                 struct scatterlist *sg, int nelems,
41                                 int direction);
42         void            (*sync_sg_for_device)(struct device *hwdev,
43                                 struct scatterlist *sg, int nelems,
44                                 int direction);
45         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46                                 int nents, int direction);
47         void            (*unmap_sg)(struct device *hwdev,
48                                 struct scatterlist *sg, int nents,
49                                 int direction);
50         int             (*dma_supported)(struct device *hwdev, u64 mask);
51         int             is_phys;
52 };
53
54 extern dma_addr_t bad_dma_address;
55 extern const struct dma_mapping_ops* dma_ops;
56 extern int iommu_merge;
57
58 static inline int dma_mapping_error(dma_addr_t dma_addr)
59 {
60         if (dma_ops->mapping_error)
61                 return dma_ops->mapping_error(dma_addr);
62
63         return (dma_addr == bad_dma_address);
64 }
65
66 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
67 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
68
69 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
70 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
71
72 extern void *dma_alloc_coherent(struct device *dev, size_t size,
73                                 dma_addr_t *dma_handle, gfp_t gfp);
74 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
75                               dma_addr_t dma_handle);
76
77 static inline dma_addr_t
78 dma_map_single(struct device *hwdev, void *ptr, size_t size,
79                int direction)
80 {
81         BUG_ON(!valid_dma_direction(direction));
82         return dma_ops->map_single(hwdev, ptr, size, direction);
83 }
84
85 static inline void
86 dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
87                  int direction)
88 {
89         BUG_ON(!valid_dma_direction(direction));
90         dma_ops->unmap_single(dev, addr, size, direction);
91 }
92
93 #define dma_map_page(dev,page,offset,size,dir) \
94         dma_map_single((dev), page_address(page)+(offset), (size), (dir))
95
96 #define dma_unmap_page dma_unmap_single
97
98 static inline void
99 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
100                         size_t size, int direction)
101 {
102         BUG_ON(!valid_dma_direction(direction));
103         if (dma_ops->sync_single_for_cpu)
104                 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
105                                              direction);
106         flush_write_buffers();
107 }
108
109 static inline void
110 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
111                            size_t size, int direction)
112 {
113         BUG_ON(!valid_dma_direction(direction));
114         if (dma_ops->sync_single_for_device)
115                 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
116                                                 direction);
117         flush_write_buffers();
118 }
119
120 static inline void
121 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
122                               unsigned long offset, size_t size, int direction)
123 {
124         BUG_ON(!valid_dma_direction(direction));
125         if (dma_ops->sync_single_range_for_cpu) {
126                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
127         }
128
129         flush_write_buffers();
130 }
131
132 static inline void
133 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
134                                  unsigned long offset, size_t size, int direction)
135 {
136         BUG_ON(!valid_dma_direction(direction));
137         if (dma_ops->sync_single_range_for_device)
138                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
139                                                       offset, size, direction);
140
141         flush_write_buffers();
142 }
143
144 static inline void
145 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
146                     int nelems, int direction)
147 {
148         BUG_ON(!valid_dma_direction(direction));
149         if (dma_ops->sync_sg_for_cpu)
150                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
151         flush_write_buffers();
152 }
153
154 static inline void
155 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
156                        int nelems, int direction)
157 {
158         BUG_ON(!valid_dma_direction(direction));
159         if (dma_ops->sync_sg_for_device) {
160                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
161         }
162
163         flush_write_buffers();
164 }
165
166 static inline int
167 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
168 {
169         BUG_ON(!valid_dma_direction(direction));
170         return dma_ops->map_sg(hwdev, sg, nents, direction);
171 }
172
173 static inline void
174 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
175              int direction)
176 {
177         BUG_ON(!valid_dma_direction(direction));
178         dma_ops->unmap_sg(hwdev, sg, nents, direction);
179 }
180
181 extern int dma_supported(struct device *hwdev, u64 mask);
182
183 /* same for gart, swiotlb, and nommu */
184 static inline int dma_get_cache_alignment(void)
185 {
186         return boot_cpu_data.x86_clflush_size;
187 }
188
189 #define dma_is_consistent(d, h) 1
190
191 extern int dma_set_mask(struct device *dev, u64 mask);
192
193 static inline void
194 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
195         enum dma_data_direction dir)
196 {
197         flush_write_buffers();
198 }
199
200 extern struct device fallback_dev;
201 extern int panic_on_overflow;
202
203 #endif /* _X8664_DMA_MAPPING_H */