1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
10 #include <asm/scatterlist.h>
12 #include <asm/swiotlb.h>
14 struct dma_mapping_ops {
15 int (*mapping_error)(dma_addr_t dma_addr);
16 void* (*alloc_coherent)(struct device *dev, size_t size,
17 dma_addr_t *dma_handle, gfp_t gfp);
18 void (*free_coherent)(struct device *dev, size_t size,
19 void *vaddr, dma_addr_t dma_handle);
20 dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
21 size_t size, int direction);
22 /* like map_single, but doesn't check the device mask */
23 dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
24 size_t size, int direction);
25 void (*unmap_single)(struct device *dev, dma_addr_t addr,
26 size_t size, int direction);
27 void (*sync_single_for_cpu)(struct device *hwdev,
28 dma_addr_t dma_handle, size_t size,
30 void (*sync_single_for_device)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
33 void (*sync_single_range_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, unsigned long offset,
35 size_t size, int direction);
36 void (*sync_single_range_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_sg_for_cpu)(struct device *hwdev,
40 struct scatterlist *sg, int nelems,
42 void (*sync_sg_for_device)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
45 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46 int nents, int direction);
47 void (*unmap_sg)(struct device *hwdev,
48 struct scatterlist *sg, int nents,
50 int (*dma_supported)(struct device *hwdev, u64 mask);
54 extern dma_addr_t bad_dma_address;
55 extern struct dma_mapping_ops* dma_ops;
56 extern int iommu_merge;
58 static inline int valid_dma_direction(int dma_direction)
60 return ((dma_direction == DMA_BIDIRECTIONAL) ||
61 (dma_direction == DMA_TO_DEVICE) ||
62 (dma_direction == DMA_FROM_DEVICE));
65 static inline int dma_mapping_error(dma_addr_t dma_addr)
67 if (dma_ops->mapping_error)
68 return dma_ops->mapping_error(dma_addr);
70 return (dma_addr == bad_dma_address);
73 extern void *dma_alloc_coherent(struct device *dev, size_t size,
74 dma_addr_t *dma_handle, gfp_t gfp);
75 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
76 dma_addr_t dma_handle);
78 static inline dma_addr_t
79 dma_map_single(struct device *hwdev, void *ptr, size_t size,
82 BUG_ON(!valid_dma_direction(direction));
83 return dma_ops->map_single(hwdev, ptr, size, direction);
87 dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
90 BUG_ON(!valid_dma_direction(direction));
91 dma_ops->unmap_single(dev, addr, size, direction);
94 #define dma_map_page(dev,page,offset,size,dir) \
95 dma_map_single((dev), page_address(page)+(offset), (size), (dir))
97 #define dma_unmap_page dma_unmap_single
100 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
101 size_t size, int direction)
103 BUG_ON(!valid_dma_direction(direction));
104 if (dma_ops->sync_single_for_cpu)
105 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
107 flush_write_buffers();
111 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
112 size_t size, int direction)
114 BUG_ON(!valid_dma_direction(direction));
115 if (dma_ops->sync_single_for_device)
116 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
118 flush_write_buffers();
122 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
123 unsigned long offset, size_t size, int direction)
125 BUG_ON(!valid_dma_direction(direction));
126 if (dma_ops->sync_single_range_for_cpu) {
127 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
130 flush_write_buffers();
134 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
135 unsigned long offset, size_t size, int direction)
137 BUG_ON(!valid_dma_direction(direction));
138 if (dma_ops->sync_single_range_for_device)
139 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
140 offset, size, direction);
142 flush_write_buffers();
146 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
147 int nelems, int direction)
149 BUG_ON(!valid_dma_direction(direction));
150 if (dma_ops->sync_sg_for_cpu)
151 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
152 flush_write_buffers();
156 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
157 int nelems, int direction)
159 BUG_ON(!valid_dma_direction(direction));
160 if (dma_ops->sync_sg_for_device) {
161 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
164 flush_write_buffers();
168 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
170 BUG_ON(!valid_dma_direction(direction));
171 return dma_ops->map_sg(hwdev, sg, nents, direction);
175 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
178 BUG_ON(!valid_dma_direction(direction));
179 dma_ops->unmap_sg(hwdev, sg, nents, direction);
182 extern int dma_supported(struct device *hwdev, u64 mask);
184 /* same for gart, swiotlb, and nommu */
185 static inline int dma_get_cache_alignment(void)
187 return boot_cpu_data.x86_clflush_size;
190 #define dma_is_consistent(h) 1
192 extern int dma_set_mask(struct device *dev, u64 mask);
195 dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
197 flush_write_buffers();
200 extern struct device fallback_dev;
201 extern int panic_on_overflow;
203 #endif /* _X8664_DMA_MAPPING_H */