[PATCH] gpio: drop vtable members .gpio_set_high .gpio_set_low gpio_set is enough
[linux-2.6] / include / asm-i386 / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5
6 #include <asm/cache.h>
7 #include <asm/io.h>
8 #include <asm/scatterlist.h>
9 #include <asm/bug.h>
10
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15                            dma_addr_t *dma_handle, gfp_t flag);
16
17 void dma_free_coherent(struct device *dev, size_t size,
18                          void *vaddr, dma_addr_t dma_handle);
19
20 static inline dma_addr_t
21 dma_map_single(struct device *dev, void *ptr, size_t size,
22                enum dma_data_direction direction)
23 {
24         if (direction == DMA_NONE)
25                 BUG();
26         WARN_ON(size == 0);
27         flush_write_buffers();
28         return virt_to_phys(ptr);
29 }
30
31 static inline void
32 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
33                  enum dma_data_direction direction)
34 {
35         if (direction == DMA_NONE)
36                 BUG();
37 }
38
39 static inline int
40 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
41            enum dma_data_direction direction)
42 {
43         int i;
44
45         if (direction == DMA_NONE)
46                 BUG();
47         WARN_ON(nents == 0 || sg[0].length == 0);
48
49         for (i = 0; i < nents; i++ ) {
50                 BUG_ON(!sg[i].page);
51
52                 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
53         }
54
55         flush_write_buffers();
56         return nents;
57 }
58
59 static inline dma_addr_t
60 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
61              size_t size, enum dma_data_direction direction)
62 {
63         BUG_ON(direction == DMA_NONE);
64         return page_to_phys(page) + offset;
65 }
66
67 static inline void
68 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
69                enum dma_data_direction direction)
70 {
71         BUG_ON(direction == DMA_NONE);
72 }
73
74
75 static inline void
76 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
77              enum dma_data_direction direction)
78 {
79         BUG_ON(direction == DMA_NONE);
80 }
81
82 static inline void
83 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
84                         enum dma_data_direction direction)
85 {
86 }
87
88 static inline void
89 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
90                         enum dma_data_direction direction)
91 {
92         flush_write_buffers();
93 }
94
95 static inline void
96 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
97                               unsigned long offset, size_t size,
98                               enum dma_data_direction direction)
99 {
100 }
101
102 static inline void
103 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
104                                  unsigned long offset, size_t size,
105                                  enum dma_data_direction direction)
106 {
107         flush_write_buffers();
108 }
109
110 static inline void
111 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
112                     enum dma_data_direction direction)
113 {
114 }
115
116 static inline void
117 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
118                     enum dma_data_direction direction)
119 {
120         flush_write_buffers();
121 }
122
123 static inline int
124 dma_mapping_error(dma_addr_t dma_addr)
125 {
126         return 0;
127 }
128
129 static inline int
130 dma_supported(struct device *dev, u64 mask)
131 {
132         /*
133          * we fall back to GFP_DMA when the mask isn't all 1s,
134          * so we can't guarantee allocations that must be
135          * within a tighter range than GFP_DMA..
136          */
137         if(mask < 0x00ffffff)
138                 return 0;
139
140         return 1;
141 }
142
143 static inline int
144 dma_set_mask(struct device *dev, u64 mask)
145 {
146         if(!dev->dma_mask || !dma_supported(dev, mask))
147                 return -EIO;
148
149         *dev->dma_mask = mask;
150
151         return 0;
152 }
153
154 static inline int
155 dma_get_cache_alignment(void)
156 {
157         /* no easy way to get cache size on all x86, so return the
158          * maximum possible, to be safe */
159         return (1 << INTERNODE_CACHE_SHIFT);
160 }
161
162 #define dma_is_consistent(d)    (1)
163
164 static inline void
165 dma_cache_sync(void *vaddr, size_t size,
166                enum dma_data_direction direction)
167 {
168         flush_write_buffers();
169 }
170
171 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
172 extern int
173 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
174                             dma_addr_t device_addr, size_t size, int flags);
175
176 extern void
177 dma_release_declared_memory(struct device *dev);
178
179 extern void *
180 dma_mark_declared_memory_occupied(struct device *dev,
181                                   dma_addr_t device_addr, size_t size);
182
183 #endif