Merge branch 'master'
[linux-2.6] / include / asm-i386 / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5
6 #include <asm/cache.h>
7 #include <asm/io.h>
8 #include <asm/scatterlist.h>
9
10 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
11 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
12
13 void *dma_alloc_coherent(struct device *dev, size_t size,
14                            dma_addr_t *dma_handle, gfp_t flag);
15
16 void dma_free_coherent(struct device *dev, size_t size,
17                          void *vaddr, dma_addr_t dma_handle);
18
19 static inline dma_addr_t
20 dma_map_single(struct device *dev, void *ptr, size_t size,
21                enum dma_data_direction direction)
22 {
23         BUG_ON(direction == DMA_NONE);
24         flush_write_buffers();
25         return virt_to_phys(ptr);
26 }
27
28 static inline void
29 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
30                  enum dma_data_direction direction)
31 {
32         BUG_ON(direction == DMA_NONE);
33 }
34
35 static inline int
36 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
37            enum dma_data_direction direction)
38 {
39         int i;
40
41         BUG_ON(direction == DMA_NONE);
42
43         for (i = 0; i < nents; i++ ) {
44                 BUG_ON(!sg[i].page);
45
46                 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
47         }
48
49         flush_write_buffers();
50         return nents;
51 }
52
53 static inline dma_addr_t
54 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
55              size_t size, enum dma_data_direction direction)
56 {
57         BUG_ON(direction == DMA_NONE);
58         return page_to_phys(page) + offset;
59 }
60
61 static inline void
62 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
63                enum dma_data_direction direction)
64 {
65         BUG_ON(direction == DMA_NONE);
66 }
67
68
69 static inline void
70 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
71              enum dma_data_direction direction)
72 {
73         BUG_ON(direction == DMA_NONE);
74 }
75
76 static inline void
77 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
78                         enum dma_data_direction direction)
79 {
80 }
81
82 static inline void
83 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
84                         enum dma_data_direction direction)
85 {
86         flush_write_buffers();
87 }
88
89 static inline void
90 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
91                               unsigned long offset, size_t size,
92                               enum dma_data_direction direction)
93 {
94 }
95
96 static inline void
97 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
98                                  unsigned long offset, size_t size,
99                                  enum dma_data_direction direction)
100 {
101         flush_write_buffers();
102 }
103
104 static inline void
105 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
106                     enum dma_data_direction direction)
107 {
108 }
109
110 static inline void
111 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
112                     enum dma_data_direction direction)
113 {
114         flush_write_buffers();
115 }
116
117 static inline int
118 dma_mapping_error(dma_addr_t dma_addr)
119 {
120         return 0;
121 }
122
123 static inline int
124 dma_supported(struct device *dev, u64 mask)
125 {
126         /*
127          * we fall back to GFP_DMA when the mask isn't all 1s,
128          * so we can't guarantee allocations that must be
129          * within a tighter range than GFP_DMA..
130          */
131         if(mask < 0x00ffffff)
132                 return 0;
133
134         return 1;
135 }
136
137 static inline int
138 dma_set_mask(struct device *dev, u64 mask)
139 {
140         if(!dev->dma_mask || !dma_supported(dev, mask))
141                 return -EIO;
142
143         *dev->dma_mask = mask;
144
145         return 0;
146 }
147
148 static inline int
149 dma_get_cache_alignment(void)
150 {
151         /* no easy way to get cache size on all x86, so return the
152          * maximum possible, to be safe */
153         return (1 << L1_CACHE_SHIFT_MAX);
154 }
155
156 #define dma_is_consistent(d)    (1)
157
158 static inline void
159 dma_cache_sync(void *vaddr, size_t size,
160                enum dma_data_direction direction)
161 {
162         flush_write_buffers();
163 }
164
165 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
166 extern int
167 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
168                             dma_addr_t device_addr, size_t size, int flags);
169
170 extern void
171 dma_release_declared_memory(struct device *dev);
172
173 extern void *
174 dma_mark_declared_memory_occupied(struct device *dev,
175                                   dma_addr_t device_addr, size_t size);
176
177 #endif