1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
10 #include <asm/scatterlist.h>
12 #include <asm/swiotlb.h>
14 struct dma_mapping_ops {
15 int (*mapping_error)(dma_addr_t dma_addr);
16 void* (*alloc_coherent)(struct device *dev, size_t size,
17 dma_addr_t *dma_handle, gfp_t gfp);
18 void (*free_coherent)(struct device *dev, size_t size,
19 void *vaddr, dma_addr_t dma_handle);
20 dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
21 size_t size, int direction);
22 /* like map_single, but doesn't check the device mask */
23 dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
24 size_t size, int direction);
25 void (*unmap_single)(struct device *dev, dma_addr_t addr,
26 size_t size, int direction);
27 void (*sync_single_for_cpu)(struct device *hwdev,
28 dma_addr_t dma_handle, size_t size,
30 void (*sync_single_for_device)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
33 void (*sync_single_range_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, unsigned long offset,
35 size_t size, int direction);
36 void (*sync_single_range_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_sg_for_cpu)(struct device *hwdev,
40 struct scatterlist *sg, int nelems,
42 void (*sync_sg_for_device)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
45 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46 int nents, int direction);
47 void (*unmap_sg)(struct device *hwdev,
48 struct scatterlist *sg, int nents,
50 int (*dma_supported)(struct device *hwdev, u64 mask);
54 extern dma_addr_t bad_dma_address;
55 extern struct dma_mapping_ops* dma_ops;
56 extern int iommu_merge;
58 static inline int dma_mapping_error(dma_addr_t dma_addr)
60 if (dma_ops->mapping_error)
61 return dma_ops->mapping_error(dma_addr);
63 return (dma_addr == bad_dma_address);
66 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
67 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
69 extern void *dma_alloc_coherent(struct device *dev, size_t size,
70 dma_addr_t *dma_handle, gfp_t gfp);
71 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
72 dma_addr_t dma_handle);
74 static inline dma_addr_t
75 dma_map_single(struct device *hwdev, void *ptr, size_t size,
78 BUG_ON(!valid_dma_direction(direction));
79 return dma_ops->map_single(hwdev, ptr, size, direction);
83 dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
86 BUG_ON(!valid_dma_direction(direction));
87 dma_ops->unmap_single(dev, addr, size, direction);
90 #define dma_map_page(dev,page,offset,size,dir) \
91 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
93 #define dma_unmap_page dma_unmap_single
96 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
97 size_t size, int direction)
99 BUG_ON(!valid_dma_direction(direction));
100 if (dma_ops->sync_single_for_cpu)
101 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
103 flush_write_buffers();
107 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
108 size_t size, int direction)
110 BUG_ON(!valid_dma_direction(direction));
111 if (dma_ops->sync_single_for_device)
112 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
114 flush_write_buffers();
118 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
119 unsigned long offset, size_t size, int direction)
121 BUG_ON(!valid_dma_direction(direction));
122 if (dma_ops->sync_single_range_for_cpu) {
123 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
126 flush_write_buffers();
130 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
131 unsigned long offset, size_t size, int direction)
133 BUG_ON(!valid_dma_direction(direction));
134 if (dma_ops->sync_single_range_for_device)
135 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
136 offset, size, direction);
138 flush_write_buffers();
142 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
143 int nelems, int direction)
145 BUG_ON(!valid_dma_direction(direction));
146 if (dma_ops->sync_sg_for_cpu)
147 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
148 flush_write_buffers();
152 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
153 int nelems, int direction)
155 BUG_ON(!valid_dma_direction(direction));
156 if (dma_ops->sync_sg_for_device) {
157 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
160 flush_write_buffers();
164 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
166 BUG_ON(!valid_dma_direction(direction));
167 return dma_ops->map_sg(hwdev, sg, nents, direction);
171 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
174 BUG_ON(!valid_dma_direction(direction));
175 dma_ops->unmap_sg(hwdev, sg, nents, direction);
178 extern int dma_supported(struct device *hwdev, u64 mask);
180 /* same for gart, swiotlb, and nommu */
181 static inline int dma_get_cache_alignment(void)
183 return boot_cpu_data.x86_clflush_size;
186 #define dma_is_consistent(d, h) 1
188 extern int dma_set_mask(struct device *dev, u64 mask);
191 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
192 enum dma_data_direction dir)
194 flush_write_buffers();
197 extern struct device fallback_dev;
198 extern int panic_on_overflow;
200 #endif /* _X8664_DMA_MAPPING_H */