[POWERPC] QE: clean up ucc_slow.c and ucc_fast.c
[linux-2.6] / include / asm-i386 / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5
6 #include <asm/cache.h>
7 #include <asm/io.h>
8 #include <asm/scatterlist.h>
9 #include <asm/bug.h>
10
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15                            dma_addr_t *dma_handle, gfp_t flag);
16
17 void dma_free_coherent(struct device *dev, size_t size,
18                          void *vaddr, dma_addr_t dma_handle);
19
20 static inline dma_addr_t
21 dma_map_single(struct device *dev, void *ptr, size_t size,
22                enum dma_data_direction direction)
23 {
24         BUG_ON(!valid_dma_direction(direction));
25         WARN_ON(size == 0);
26         flush_write_buffers();
27         return virt_to_phys(ptr);
28 }
29
30 static inline void
31 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
32                  enum dma_data_direction direction)
33 {
34         BUG_ON(!valid_dma_direction(direction));
35 }
36
37 static inline int
38 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
39            enum dma_data_direction direction)
40 {
41         int i;
42
43         BUG_ON(!valid_dma_direction(direction));
44         WARN_ON(nents == 0 || sg[0].length == 0);
45
46         for (i = 0; i < nents; i++ ) {
47                 BUG_ON(!sg[i].page);
48
49                 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
50         }
51
52         flush_write_buffers();
53         return nents;
54 }
55
56 static inline dma_addr_t
57 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
58              size_t size, enum dma_data_direction direction)
59 {
60         BUG_ON(!valid_dma_direction(direction));
61         return page_to_phys(page) + offset;
62 }
63
64 static inline void
65 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
66                enum dma_data_direction direction)
67 {
68         BUG_ON(!valid_dma_direction(direction));
69 }
70
71
72 static inline void
73 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
74              enum dma_data_direction direction)
75 {
76         BUG_ON(!valid_dma_direction(direction));
77 }
78
79 static inline void
80 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
81                         enum dma_data_direction direction)
82 {
83 }
84
85 static inline void
86 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
87                         enum dma_data_direction direction)
88 {
89         flush_write_buffers();
90 }
91
92 static inline void
93 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
94                               unsigned long offset, size_t size,
95                               enum dma_data_direction direction)
96 {
97 }
98
99 static inline void
100 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
101                                  unsigned long offset, size_t size,
102                                  enum dma_data_direction direction)
103 {
104         flush_write_buffers();
105 }
106
107 static inline void
108 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
109                     enum dma_data_direction direction)
110 {
111 }
112
113 static inline void
114 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
115                     enum dma_data_direction direction)
116 {
117         flush_write_buffers();
118 }
119
120 static inline int
121 dma_mapping_error(dma_addr_t dma_addr)
122 {
123         return 0;
124 }
125
126 static inline int
127 dma_supported(struct device *dev, u64 mask)
128 {
129         /*
130          * we fall back to GFP_DMA when the mask isn't all 1s,
131          * so we can't guarantee allocations that must be
132          * within a tighter range than GFP_DMA..
133          */
134         if(mask < 0x00ffffff)
135                 return 0;
136
137         return 1;
138 }
139
140 static inline int
141 dma_set_mask(struct device *dev, u64 mask)
142 {
143         if(!dev->dma_mask || !dma_supported(dev, mask))
144                 return -EIO;
145
146         *dev->dma_mask = mask;
147
148         return 0;
149 }
150
151 static inline int
152 dma_get_cache_alignment(void)
153 {
154         /* no easy way to get cache size on all x86, so return the
155          * maximum possible, to be safe */
156         return (1 << INTERNODE_CACHE_SHIFT);
157 }
158
159 #define dma_is_consistent(d, h) (1)
160
161 static inline void
162 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
163                enum dma_data_direction direction)
164 {
165         flush_write_buffers();
166 }
167
168 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
169 extern int
170 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
171                             dma_addr_t device_addr, size_t size, int flags);
172
173 extern void
174 dma_release_declared_memory(struct device *dev);
175
176 extern void *
177 dma_mark_declared_memory_occupied(struct device *dev,
178                                   dma_addr_t device_addr, size_t size);
179
180 #endif