x86: move dma_sync_single_for_device to common header
[linux-2.6] / include / asm-x86 / dma-mapping_64.h
1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
3
4 extern dma_addr_t bad_dma_address;
5 extern int iommu_merge;
6
7 static inline int dma_mapping_error(dma_addr_t dma_addr)
8 {
9         if (dma_ops->mapping_error)
10                 return dma_ops->mapping_error(dma_addr);
11
12         return (dma_addr == bad_dma_address);
13 }
14
15 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
16 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
17
18 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
19 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
20
21 extern void *dma_alloc_coherent(struct device *dev, size_t size,
22                                 dma_addr_t *dma_handle, gfp_t gfp);
23 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
24                               dma_addr_t dma_handle);
25
26 #define dma_map_page(dev,page,offset,size,dir) \
27         dma_map_single((dev), page_address(page)+(offset), (size), (dir))
28
29 #define dma_unmap_page dma_unmap_single
30
31 static inline void
32 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
33                               unsigned long offset, size_t size, int direction)
34 {
35         BUG_ON(!valid_dma_direction(direction));
36         if (dma_ops->sync_single_range_for_cpu) {
37                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
38         }
39
40         flush_write_buffers();
41 }
42
43 static inline void
44 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
45                                  unsigned long offset, size_t size, int direction)
46 {
47         BUG_ON(!valid_dma_direction(direction));
48         if (dma_ops->sync_single_range_for_device)
49                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
50                                                       offset, size, direction);
51
52         flush_write_buffers();
53 }
54
55 static inline void
56 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
57                     int nelems, int direction)
58 {
59         BUG_ON(!valid_dma_direction(direction));
60         if (dma_ops->sync_sg_for_cpu)
61                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
62         flush_write_buffers();
63 }
64
65 static inline void
66 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
67                        int nelems, int direction)
68 {
69         BUG_ON(!valid_dma_direction(direction));
70         if (dma_ops->sync_sg_for_device) {
71                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
72         }
73
74         flush_write_buffers();
75 }
76
77 extern int dma_supported(struct device *hwdev, u64 mask);
78
79 /* same for gart, swiotlb, and nommu */
80 static inline int dma_get_cache_alignment(void)
81 {
82         return boot_cpu_data.x86_clflush_size;
83 }
84
85 #define dma_is_consistent(d, h) 1
86
87 extern int dma_set_mask(struct device *dev, u64 mask);
88
89 static inline void
90 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
91         enum dma_data_direction dir)
92 {
93         flush_write_buffers();
94 }
95
96 extern struct device fallback_dev;
97 extern int panic_on_overflow;
98
99 #endif /* _X8664_DMA_MAPPING_H */