2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 #include <linux/types.h>
11 #include <linux/dma-mapping.h>
13 #include <linux/module.h>
14 #include <linux/string.h>
16 #include <asm/cache.h>
19 void *dma_alloc_noncoherent(struct device *dev, size_t size,
20 dma_addr_t * dma_handle, gfp_t gfp)
23 /* ignore region specifiers */
24 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
26 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
28 ret = (void *) __get_free_pages(gfp, get_order(size));
32 *dma_handle = virt_to_phys(ret);
38 EXPORT_SYMBOL(dma_alloc_noncoherent);
40 void *dma_alloc_coherent(struct device *dev, size_t size,
41 dma_addr_t * dma_handle, gfp_t gfp)
42 __attribute__((alias("dma_alloc_noncoherent")));
44 EXPORT_SYMBOL(dma_alloc_coherent);
46 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
47 dma_addr_t dma_handle)
49 unsigned long addr = (unsigned long) vaddr;
51 free_pages(addr, get_order(size));
54 EXPORT_SYMBOL(dma_free_noncoherent);
56 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
57 dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent")));
59 EXPORT_SYMBOL(dma_free_coherent);
61 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
62 enum dma_data_direction direction)
64 BUG_ON(direction == DMA_NONE);
69 EXPORT_SYMBOL(dma_map_single);
71 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
72 enum dma_data_direction direction)
74 BUG_ON(direction == DMA_NONE);
77 EXPORT_SYMBOL(dma_unmap_single);
79 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
80 enum dma_data_direction direction)
84 BUG_ON(direction == DMA_NONE);
86 for (i = 0; i < nents; i++, sg++) {
87 sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset;
93 EXPORT_SYMBOL(dma_map_sg);
95 dma_addr_t dma_map_page(struct device *dev, struct page *page,
96 unsigned long offset, size_t size, enum dma_data_direction direction)
98 BUG_ON(direction == DMA_NONE);
100 return page_to_phys(page) + offset;
103 EXPORT_SYMBOL(dma_map_page);
105 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
106 enum dma_data_direction direction)
108 BUG_ON(direction == DMA_NONE);
111 EXPORT_SYMBOL(dma_unmap_page);
113 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
114 enum dma_data_direction direction)
116 BUG_ON(direction == DMA_NONE);
119 EXPORT_SYMBOL(dma_unmap_sg);
121 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
122 size_t size, enum dma_data_direction direction)
124 BUG_ON(direction == DMA_NONE);
127 EXPORT_SYMBOL(dma_sync_single_for_cpu);
129 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
130 size_t size, enum dma_data_direction direction)
132 BUG_ON(direction == DMA_NONE);
135 EXPORT_SYMBOL(dma_sync_single_for_device);
137 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
138 unsigned long offset, size_t size,
139 enum dma_data_direction direction)
141 BUG_ON(direction == DMA_NONE);
144 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
146 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
147 unsigned long offset, size_t size,
148 enum dma_data_direction direction)
150 BUG_ON(direction == DMA_NONE);
153 EXPORT_SYMBOL(dma_sync_single_range_for_device);
155 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
156 enum dma_data_direction direction)
158 BUG_ON(direction == DMA_NONE);
161 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
163 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
164 enum dma_data_direction direction)
166 BUG_ON(direction == DMA_NONE);
169 EXPORT_SYMBOL(dma_sync_sg_for_device);
171 int dma_mapping_error(dma_addr_t dma_addr)
176 EXPORT_SYMBOL(dma_mapping_error);
178 int dma_supported(struct device *dev, u64 mask)
181 * we fall back to GFP_DMA when the mask isn't all 1s,
182 * so we can't guarantee allocations that must be
183 * within a tighter range than GFP_DMA..
185 if (mask < 0x00ffffff)
191 EXPORT_SYMBOL(dma_supported);
193 int dma_is_consistent(dma_addr_t dma_addr)
198 EXPORT_SYMBOL(dma_is_consistent);
200 void dma_cache_sync(void *vaddr, size_t size,
201 enum dma_data_direction direction)
203 BUG_ON(direction == DMA_NONE);
206 EXPORT_SYMBOL(dma_cache_sync);
208 /* The DAC routines are a PCIism.. */
212 #include <linux/pci.h>
214 dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
215 struct page *page, unsigned long offset, int direction)
217 return (dma64_addr_t)page_to_phys(page) + offset;
220 EXPORT_SYMBOL(pci_dac_page_to_dma);
222 struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
223 dma64_addr_t dma_addr)
225 return mem_map + (dma_addr >> PAGE_SHIFT);
228 EXPORT_SYMBOL(pci_dac_dma_to_page);
230 unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
231 dma64_addr_t dma_addr)
233 return dma_addr & ~PAGE_MASK;
236 EXPORT_SYMBOL(pci_dac_dma_to_offset);
238 void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
239 dma64_addr_t dma_addr, size_t len, int direction)
241 BUG_ON(direction == PCI_DMA_NONE);
244 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
246 void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
247 dma64_addr_t dma_addr, size_t len, int direction)
249 BUG_ON(direction == PCI_DMA_NONE);
252 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
254 #endif /* CONFIG_PCI */