1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
5 #include <linux/scatterlist.h>
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flag);
17 void dma_free_coherent(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
20 static inline dma_addr_t
21 dma_map_single(struct device *dev, void *ptr, size_t size,
22 enum dma_data_direction direction)
24 BUG_ON(!valid_dma_direction(direction));
26 flush_write_buffers();
27 return virt_to_phys(ptr);
31 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
32 enum dma_data_direction direction)
34 BUG_ON(!valid_dma_direction(direction));
38 dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
39 enum dma_data_direction direction)
41 struct scatterlist *sg;
44 BUG_ON(!valid_dma_direction(direction));
45 WARN_ON(nents == 0 || sglist[0].length == 0);
47 for_each_sg(sglist, sg, nents, i) {
50 sg->dma_address = page_to_phys(sg->page) + sg->offset;
53 flush_write_buffers();
57 static inline dma_addr_t
58 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
59 size_t size, enum dma_data_direction direction)
61 BUG_ON(!valid_dma_direction(direction));
62 return page_to_phys(page) + offset;
66 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
67 enum dma_data_direction direction)
69 BUG_ON(!valid_dma_direction(direction));
74 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
75 enum dma_data_direction direction)
77 BUG_ON(!valid_dma_direction(direction));
81 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
82 enum dma_data_direction direction)
87 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
88 enum dma_data_direction direction)
90 flush_write_buffers();
94 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
95 unsigned long offset, size_t size,
96 enum dma_data_direction direction)
101 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
102 unsigned long offset, size_t size,
103 enum dma_data_direction direction)
105 flush_write_buffers();
109 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
110 enum dma_data_direction direction)
115 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
116 enum dma_data_direction direction)
118 flush_write_buffers();
122 dma_mapping_error(dma_addr_t dma_addr)
127 extern int forbid_dac;
130 dma_supported(struct device *dev, u64 mask)
133 * we fall back to GFP_DMA when the mask isn't all 1s,
134 * so we can't guarantee allocations that must be
135 * within a tighter range than GFP_DMA..
137 if(mask < 0x00ffffff)
140 /* Work around chipset bugs */
141 if (forbid_dac > 0 && mask > 0xffffffffULL)
148 dma_set_mask(struct device *dev, u64 mask)
150 if(!dev->dma_mask || !dma_supported(dev, mask))
153 *dev->dma_mask = mask;
159 dma_get_cache_alignment(void)
161 /* no easy way to get cache size on all x86, so return the
162 * maximum possible, to be safe */
163 return (1 << INTERNODE_CACHE_SHIFT);
166 #define dma_is_consistent(d, h) (1)
169 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
170 enum dma_data_direction direction)
172 flush_write_buffers();
175 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
177 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
178 dma_addr_t device_addr, size_t size, int flags);
181 dma_release_declared_memory(struct device *dev);
184 dma_mark_declared_memory_occupied(struct device *dev,
185 dma_addr_t device_addr, size_t size);