Pull bugzilla-7897 into release branch
[linux-2.6] / arch / mips / mm / dma-default.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/string.h>
16
17 #include <asm/cache.h>
18 #include <asm/io.h>
19
20 #include <dma-coherence.h>
21
22 /*
23  * Warning on the terminology - Linux calls an uncached area coherent;
24  * MIPS terminology calls memory areas with hardware maintained coherency
25  * coherent.
26  */
27
28 static inline int cpu_is_noncoherent_r10000(struct device *dev)
29 {
30         return !plat_device_is_coherent(dev) &&
31                (current_cpu_data.cputype == CPU_R10000 &&
32                current_cpu_data.cputype == CPU_R12000);
33 }
34
35 void *dma_alloc_noncoherent(struct device *dev, size_t size,
36         dma_addr_t * dma_handle, gfp_t gfp)
37 {
38         void *ret;
39
40         /* ignore region specifiers */
41         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
42
43         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
44                 gfp |= GFP_DMA;
45         ret = (void *) __get_free_pages(gfp, get_order(size));
46
47         if (ret != NULL) {
48                 memset(ret, 0, size);
49                 *dma_handle = plat_map_dma_mem(dev, ret, size);
50         }
51
52         return ret;
53 }
54
55 EXPORT_SYMBOL(dma_alloc_noncoherent);
56
57 void *dma_alloc_coherent(struct device *dev, size_t size,
58         dma_addr_t * dma_handle, gfp_t gfp)
59 {
60         void *ret;
61
62         /* ignore region specifiers */
63         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
64
65         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
66                 gfp |= GFP_DMA;
67         ret = (void *) __get_free_pages(gfp, get_order(size));
68
69         if (ret) {
70                 memset(ret, 0, size);
71                 *dma_handle = plat_map_dma_mem(dev, ret, size);
72
73                 if (!plat_device_is_coherent(dev)) {
74                         dma_cache_wback_inv((unsigned long) ret, size);
75                         ret = UNCAC_ADDR(ret);
76                 }
77         }
78
79         return ret;
80 }
81
82 EXPORT_SYMBOL(dma_alloc_coherent);
83
84 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
85         dma_addr_t dma_handle)
86 {
87         free_pages((unsigned long) vaddr, get_order(size));
88 }
89
90 EXPORT_SYMBOL(dma_free_noncoherent);
91
92 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
93         dma_addr_t dma_handle)
94 {
95         unsigned long addr = (unsigned long) vaddr;
96
97         if (!plat_device_is_coherent(dev))
98                 addr = CAC_ADDR(addr);
99
100         free_pages(addr, get_order(size));
101 }
102
103 EXPORT_SYMBOL(dma_free_coherent);
104
105 static inline void __dma_sync(unsigned long addr, size_t size,
106         enum dma_data_direction direction)
107 {
108         switch (direction) {
109         case DMA_TO_DEVICE:
110                 dma_cache_wback(addr, size);
111                 break;
112
113         case DMA_FROM_DEVICE:
114                 dma_cache_inv(addr, size);
115                 break;
116
117         case DMA_BIDIRECTIONAL:
118                 dma_cache_wback_inv(addr, size);
119                 break;
120
121         default:
122                 BUG();
123         }
124 }
125
126 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
127         enum dma_data_direction direction)
128 {
129         unsigned long addr = (unsigned long) ptr;
130
131         if (!plat_device_is_coherent(dev))
132                 __dma_sync(addr, size, direction);
133
134         return plat_map_dma_mem(dev, ptr, size);
135 }
136
137 EXPORT_SYMBOL(dma_map_single);
138
139 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
140         enum dma_data_direction direction)
141 {
142         if (cpu_is_noncoherent_r10000(dev))
143                 __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size,
144                            direction);
145
146         plat_unmap_dma_mem(dma_addr);
147 }
148
149 EXPORT_SYMBOL(dma_unmap_single);
150
151 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
152         enum dma_data_direction direction)
153 {
154         int i;
155
156         BUG_ON(direction == DMA_NONE);
157
158         for (i = 0; i < nents; i++, sg++) {
159                 unsigned long addr;
160
161                 addr = (unsigned long) page_address(sg->page);
162                 if (!plat_device_is_coherent(dev) && addr)
163                         __dma_sync(addr + sg->offset, sg->length, direction);
164                 sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
165                                   sg->offset;
166         }
167
168         return nents;
169 }
170
171 EXPORT_SYMBOL(dma_map_sg);
172
173 dma_addr_t dma_map_page(struct device *dev, struct page *page,
174         unsigned long offset, size_t size, enum dma_data_direction direction)
175 {
176         BUG_ON(direction == DMA_NONE);
177
178         if (!plat_device_is_coherent(dev)) {
179                 unsigned long addr;
180
181                 addr = (unsigned long) page_address(page) + offset;
182                 dma_cache_wback_inv(addr, size);
183         }
184
185         return plat_map_dma_mem_page(dev, page) + offset;
186 }
187
188 EXPORT_SYMBOL(dma_map_page);
189
190 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
191         enum dma_data_direction direction)
192 {
193         BUG_ON(direction == DMA_NONE);
194
195         if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
196                 unsigned long addr;
197
198                 addr = plat_dma_addr_to_phys(dma_address);
199                 dma_cache_wback_inv(addr, size);
200         }
201
202         plat_unmap_dma_mem(dma_address);
203 }
204
205 EXPORT_SYMBOL(dma_unmap_page);
206
207 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
208         enum dma_data_direction direction)
209 {
210         unsigned long addr;
211         int i;
212
213         BUG_ON(direction == DMA_NONE);
214
215         for (i = 0; i < nhwentries; i++, sg++) {
216                 if (!plat_device_is_coherent(dev) &&
217                     direction != DMA_TO_DEVICE) {
218                         addr = (unsigned long) page_address(sg->page);
219                         if (addr)
220                                 __dma_sync(addr + sg->offset, sg->length,
221                                            direction);
222                 }
223                 plat_unmap_dma_mem(sg->dma_address);
224         }
225 }
226
227 EXPORT_SYMBOL(dma_unmap_sg);
228
229 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
230         size_t size, enum dma_data_direction direction)
231 {
232         BUG_ON(direction == DMA_NONE);
233
234         if (cpu_is_noncoherent_r10000(dev)) {
235                 unsigned long addr;
236
237                 addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
238                 __dma_sync(addr, size, direction);
239         }
240 }
241
242 EXPORT_SYMBOL(dma_sync_single_for_cpu);
243
244 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
245         size_t size, enum dma_data_direction direction)
246 {
247         BUG_ON(direction == DMA_NONE);
248
249         if (cpu_is_noncoherent_r10000(dev)) {
250                 unsigned long addr;
251
252                 addr = plat_dma_addr_to_phys(dma_handle);
253                 __dma_sync(addr, size, direction);
254         }
255 }
256
257 EXPORT_SYMBOL(dma_sync_single_for_device);
258
259 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
260         unsigned long offset, size_t size, enum dma_data_direction direction)
261 {
262         BUG_ON(direction == DMA_NONE);
263
264         if (cpu_is_noncoherent_r10000(dev)) {
265                 unsigned long addr;
266
267                 addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
268                 __dma_sync(addr + offset, size, direction);
269         }
270 }
271
272 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
273
274 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
275         unsigned long offset, size_t size, enum dma_data_direction direction)
276 {
277         BUG_ON(direction == DMA_NONE);
278
279         if (cpu_is_noncoherent_r10000(dev)) {
280                 unsigned long addr;
281
282                 addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
283                 __dma_sync(addr + offset, size, direction);
284         }
285 }
286
287 EXPORT_SYMBOL(dma_sync_single_range_for_device);
288
289 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
290         enum dma_data_direction direction)
291 {
292         int i;
293
294         BUG_ON(direction == DMA_NONE);
295
296         /* Make sure that gcc doesn't leave the empty loop body.  */
297         for (i = 0; i < nelems; i++, sg++) {
298                 if (!plat_device_is_coherent(dev))
299                         __dma_sync((unsigned long)page_address(sg->page),
300                                    sg->length, direction);
301                 plat_unmap_dma_mem(sg->dma_address);
302         }
303 }
304
305 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
306
307 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
308         enum dma_data_direction direction)
309 {
310         int i;
311
312         BUG_ON(direction == DMA_NONE);
313
314         /* Make sure that gcc doesn't leave the empty loop body.  */
315         for (i = 0; i < nelems; i++, sg++) {
316                 if (!plat_device_is_coherent(dev))
317                         __dma_sync((unsigned long)page_address(sg->page),
318                                    sg->length, direction);
319                 plat_unmap_dma_mem(sg->dma_address);
320         }
321 }
322
323 EXPORT_SYMBOL(dma_sync_sg_for_device);
324
325 int dma_mapping_error(dma_addr_t dma_addr)
326 {
327         return 0;
328 }
329
330 EXPORT_SYMBOL(dma_mapping_error);
331
332 int dma_supported(struct device *dev, u64 mask)
333 {
334         /*
335          * we fall back to GFP_DMA when the mask isn't all 1s,
336          * so we can't guarantee allocations that must be
337          * within a tighter range than GFP_DMA..
338          */
339         if (mask < 0x00ffffff)
340                 return 0;
341
342         return 1;
343 }
344
345 EXPORT_SYMBOL(dma_supported);
346
347 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
348 {
349         return plat_device_is_coherent(dev);
350 }
351
352 EXPORT_SYMBOL(dma_is_consistent);
353
354 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
355                enum dma_data_direction direction)
356 {
357         BUG_ON(direction == DMA_NONE);
358
359         if (!plat_device_is_coherent(dev))
360                 dma_cache_wback_inv((unsigned long)vaddr, size);
361 }
362
363 EXPORT_SYMBOL(dma_cache_sync);