1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
12 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
14 struct dma_mapping_ops {
15 int (*mapping_error)(struct device *dev,
17 void* (*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t gfp);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *vaddr, dma_addr_t dma_handle);
21 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
22 size_t size, int direction);
23 void (*unmap_single)(struct device *dev, dma_addr_t addr,
24 size_t size, int direction);
25 void (*sync_single_for_cpu)(struct device *hwdev,
26 dma_addr_t dma_handle, size_t size,
28 void (*sync_single_for_device)(struct device *hwdev,
29 dma_addr_t dma_handle, size_t size,
31 void (*sync_single_range_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, unsigned long offset,
33 size_t size, int direction);
34 void (*sync_single_range_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, unsigned long offset,
36 size_t size, int direction);
37 void (*sync_sg_for_cpu)(struct device *hwdev,
38 struct scatterlist *sg, int nelems,
40 void (*sync_sg_for_device)(struct device *hwdev,
41 struct scatterlist *sg, int nelems,
43 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
44 int nents, int direction);
45 void (*unmap_sg)(struct device *hwdev,
46 struct scatterlist *sg, int nents,
48 int (*dma_supported_op)(struct device *hwdev, u64 mask);
52 extern struct dma_mapping_ops *dma_ops;
53 extern struct ia64_machine_vector ia64_mv;
54 extern void set_iommu_machvec(void);
56 #define dma_alloc_coherent(dev, size, handle, gfp) \
57 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
59 /* coherent mem. is cheap */
61 dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
64 return dma_alloc_coherent(dev, size, dma_handle, flag);
66 #define dma_free_coherent platform_dma_free_coherent
68 dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
69 dma_addr_t dma_handle)
71 dma_free_coherent(dev, size, cpu_addr, dma_handle);
73 #define dma_map_single_attrs platform_dma_map_single_attrs
74 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
77 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
79 #define dma_map_sg_attrs platform_dma_map_sg_attrs
80 static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
83 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
85 #define dma_unmap_single_attrs platform_dma_unmap_single_attrs
86 static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
89 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
91 #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs
92 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
95 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
97 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
98 #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
99 #define dma_sync_single_for_device platform_dma_sync_single_for_device
100 #define dma_sync_sg_for_device platform_dma_sync_sg_for_device
101 #define dma_mapping_error platform_dma_mapping_error
103 #define dma_map_page(dev, pg, off, size, dir) \
104 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
105 #define dma_unmap_page(dev, dma_addr, size, dir) \
106 dma_unmap_single(dev, dma_addr, size, dir)
109 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
110 * See Documentation/DMA-API.txt for details.
113 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
114 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
115 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
116 dma_sync_single_for_device(dev, dma_handle, size, dir)
118 #define dma_supported platform_dma_supported
121 dma_set_mask (struct device *dev, u64 mask)
123 if (!dev->dma_mask || !dma_supported(dev, mask))
125 *dev->dma_mask = mask;
129 extern int dma_get_cache_alignment(void);
132 dma_cache_sync (struct device *dev, void *vaddr, size_t size,
133 enum dma_data_direction dir)
136 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
137 * ensure that dma_cache_sync() enforces order, hence the mb().
142 #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
144 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
151 #endif /* _ASM_IA64_DMA_MAPPING_H */