x86: move dma_sync_single_range_for_cpu to common header
[linux-2.6] / include / asm-x86 / dma-mapping_32.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5 #include <linux/scatterlist.h>
6
7 #include <asm/cache.h>
8 #include <asm/io.h>
9 #include <asm/bug.h>
10
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
13
14 void *dma_alloc_coherent(struct device *dev, size_t size,
15                            dma_addr_t *dma_handle, gfp_t flag);
16
17 void dma_free_coherent(struct device *dev, size_t size,
18                          void *vaddr, dma_addr_t dma_handle);
19
20 static inline dma_addr_t
21 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
22              size_t size, enum dma_data_direction direction)
23 {
24         BUG_ON(!valid_dma_direction(direction));
25         return page_to_phys(page) + offset;
26 }
27
28 static inline void
29 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
30                enum dma_data_direction direction)
31 {
32         BUG_ON(!valid_dma_direction(direction));
33 }
34
35 static inline void
36 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
37                                  unsigned long offset, size_t size,
38                                  enum dma_data_direction direction)
39 {
40         flush_write_buffers();
41 }
42
43 static inline void
44 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
45                     enum dma_data_direction direction)
46 {
47 }
48
49 static inline void
50 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
51                     enum dma_data_direction direction)
52 {
53         flush_write_buffers();
54 }
55
56 static inline int
57 dma_mapping_error(dma_addr_t dma_addr)
58 {
59         return 0;
60 }
61
62 extern int forbid_dac;
63
64 static inline int
65 dma_supported(struct device *dev, u64 mask)
66 {
67         /*
68          * we fall back to GFP_DMA when the mask isn't all 1s,
69          * so we can't guarantee allocations that must be
70          * within a tighter range than GFP_DMA..
71          */
72         if(mask < 0x00ffffff)
73                 return 0;
74
75         /* Work around chipset bugs */
76         if (forbid_dac > 0 && mask > 0xffffffffULL)
77                 return 0;
78
79         return 1;
80 }
81
82 static inline int
83 dma_set_mask(struct device *dev, u64 mask)
84 {
85         if(!dev->dma_mask || !dma_supported(dev, mask))
86                 return -EIO;
87
88         *dev->dma_mask = mask;
89
90         return 0;
91 }
92
93 static inline int
94 dma_get_cache_alignment(void)
95 {
96         /* no easy way to get cache size on all x86, so return the
97          * maximum possible, to be safe */
98         return (1 << INTERNODE_CACHE_SHIFT);
99 }
100
101 #define dma_is_consistent(d, h) (1)
102
103 static inline void
104 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
105                enum dma_data_direction direction)
106 {
107         flush_write_buffers();
108 }
109
110 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
111 extern int
112 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
113                             dma_addr_t device_addr, size_t size, int flags);
114
115 extern void
116 dma_release_declared_memory(struct device *dev);
117
118 extern void *
119 dma_mark_declared_memory_occupied(struct device *dev,
120                                   dma_addr_t device_addr, size_t size);
121
122 #endif