x86: move dma_map_page and dma_unmap_page to common header
[linux-2.6] / include / asm-x86 / dma-mapping.h
1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/scatterlist.h>
10 #include <asm/io.h>
11 #include <asm/swiotlb.h>
12
13 struct dma_mapping_ops {
14         int             (*mapping_error)(dma_addr_t dma_addr);
15         void*           (*alloc_coherent)(struct device *dev, size_t size,
16                                 dma_addr_t *dma_handle, gfp_t gfp);
17         void            (*free_coherent)(struct device *dev, size_t size,
18                                 void *vaddr, dma_addr_t dma_handle);
19         dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
20                                 size_t size, int direction);
21         /* like map_single, but doesn't check the device mask */
22         dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
23                                 size_t size, int direction);
24         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
25                                 size_t size, int direction);
26         void            (*sync_single_for_cpu)(struct device *hwdev,
27                                 dma_addr_t dma_handle, size_t size,
28                                 int direction);
29         void            (*sync_single_for_device)(struct device *hwdev,
30                                 dma_addr_t dma_handle, size_t size,
31                                 int direction);
32         void            (*sync_single_range_for_cpu)(struct device *hwdev,
33                                 dma_addr_t dma_handle, unsigned long offset,
34                                 size_t size, int direction);
35         void            (*sync_single_range_for_device)(struct device *hwdev,
36                                 dma_addr_t dma_handle, unsigned long offset,
37                                 size_t size, int direction);
38         void            (*sync_sg_for_cpu)(struct device *hwdev,
39                                 struct scatterlist *sg, int nelems,
40                                 int direction);
41         void            (*sync_sg_for_device)(struct device *hwdev,
42                                 struct scatterlist *sg, int nelems,
43                                 int direction);
44         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
45                                 int nents, int direction);
46         void            (*unmap_sg)(struct device *hwdev,
47                                 struct scatterlist *sg, int nents,
48                                 int direction);
49         int             (*dma_supported)(struct device *hwdev, u64 mask);
50         int             is_phys;
51 };
52
53 extern const struct dma_mapping_ops *dma_ops;
54
55 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
56 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
57
58 void *dma_alloc_coherent(struct device *dev, size_t size,
59                            dma_addr_t *dma_handle, gfp_t flag);
60
61 void dma_free_coherent(struct device *dev, size_t size,
62                          void *vaddr, dma_addr_t dma_handle);
63
64
65 #ifdef CONFIG_X86_32
66 # include "dma-mapping_32.h"
67 #else
68 # include "dma-mapping_64.h"
69 #endif
70
71 static inline dma_addr_t
72 dma_map_single(struct device *hwdev, void *ptr, size_t size,
73                int direction)
74 {
75         BUG_ON(!valid_dma_direction(direction));
76         return dma_ops->map_single(hwdev, ptr, size, direction);
77 }
78
79 static inline void
80 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
81                  int direction)
82 {
83         BUG_ON(!valid_dma_direction(direction));
84         if (dma_ops->unmap_single)
85                 dma_ops->unmap_single(dev, addr, size, direction);
86 }
87
88 static inline int
89 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
90            int nents, int direction)
91 {
92         BUG_ON(!valid_dma_direction(direction));
93         return dma_ops->map_sg(hwdev, sg, nents, direction);
94 }
95
96 static inline void
97 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
98              int direction)
99 {
100         BUG_ON(!valid_dma_direction(direction));
101         if (dma_ops->unmap_sg)
102                 dma_ops->unmap_sg(hwdev, sg, nents, direction);
103 }
104
105 static inline void
106 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
107                         size_t size, int direction)
108 {
109         BUG_ON(!valid_dma_direction(direction));
110         if (dma_ops->sync_single_for_cpu)
111                 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
112                                              direction);
113         flush_write_buffers();
114 }
115
116 static inline void
117 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
118                            size_t size, int direction)
119 {
120         BUG_ON(!valid_dma_direction(direction));
121         if (dma_ops->sync_single_for_device)
122                 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
123                                                 direction);
124         flush_write_buffers();
125 }
126
127 static inline void
128 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
129                               unsigned long offset, size_t size, int direction)
130 {
131         BUG_ON(!valid_dma_direction(direction));
132         if (dma_ops->sync_single_range_for_cpu)
133                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
134                                                    size, direction);
135
136         flush_write_buffers();
137 }
138
139 static inline void
140 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
141                                  unsigned long offset, size_t size,
142                                  int direction)
143 {
144         BUG_ON(!valid_dma_direction(direction));
145         if (dma_ops->sync_single_range_for_device)
146                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
147                                                       offset, size, direction);
148
149         flush_write_buffers();
150 }
151
152 static inline void
153 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
154                     int nelems, int direction)
155 {
156         BUG_ON(!valid_dma_direction(direction));
157         if (dma_ops->sync_sg_for_cpu)
158                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
159         flush_write_buffers();
160 }
161
162 static inline void
163 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
164                        int nelems, int direction)
165 {
166         BUG_ON(!valid_dma_direction(direction));
167         if (dma_ops->sync_sg_for_device)
168                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
169
170         flush_write_buffers();
171 }
172
173 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
174                                       size_t offset, size_t size,
175                                       int direction)
176 {
177         return dma_map_single(dev, page_address(page)+offset, size, direction);
178 }
179
180 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
181                                   size_t size, int direction)
182 {
183         dma_unmap_single(dev, addr, size, direction);
184 }
185
186 #endif