[TCP]: NewReno must count every skb while marking losses
[linux-2.6] / include / asm-sh / dma-mapping.h
1 #ifndef __ASM_SH_DMA_MAPPING_H
2 #define __ASM_SH_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5 #include <linux/scatterlist.h>
6 #include <asm/cacheflush.h>
7 #include <asm/io.h>
8
9 extern struct bus_type pci_bus_type;
10
11 #define dma_supported(dev, mask)        (1)
12
13 static inline int dma_set_mask(struct device *dev, u64 mask)
14 {
15         if (!dev->dma_mask || !dma_supported(dev, mask))
16                 return -EIO;
17
18         *dev->dma_mask = mask;
19
20         return 0;
21 }
22
23 void *dma_alloc_coherent(struct device *dev, size_t size,
24                          dma_addr_t *dma_handle, gfp_t flag);
25
26 void dma_free_coherent(struct device *dev, size_t size,
27                        void *vaddr, dma_addr_t dma_handle);
28
29 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
30                     enum dma_data_direction dir);
31
32 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
33 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
34 #define dma_is_consistent(d, h) (1)
35
36 static inline dma_addr_t dma_map_single(struct device *dev,
37                                         void *ptr, size_t size,
38                                         enum dma_data_direction dir)
39 {
40 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
41         if (dev->bus == &pci_bus_type)
42                 return virt_to_phys(ptr);
43 #endif
44         dma_cache_sync(dev, ptr, size, dir);
45
46         return virt_to_phys(ptr);
47 }
48
49 #define dma_unmap_single(dev, addr, size, dir)  do { } while (0)
50
51 static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
52                              int nents, enum dma_data_direction dir)
53 {
54         int i;
55
56         for (i = 0; i < nents; i++) {
57 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
58                 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
59 #endif
60                 sg[i].dma_address = sg_phys(&sg[i]);
61         }
62
63         return nents;
64 }
65
66 #define dma_unmap_sg(dev, sg, nents, dir)       do { } while (0)
67
68 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
69                                       unsigned long offset, size_t size,
70                                       enum dma_data_direction dir)
71 {
72         return dma_map_single(dev, page_address(page) + offset, size, dir);
73 }
74
75 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
76                                   size_t size, enum dma_data_direction dir)
77 {
78         dma_unmap_single(dev, dma_address, size, dir);
79 }
80
81 static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
82                                    size_t size, enum dma_data_direction dir)
83 {
84 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
85         if (dev->bus == &pci_bus_type)
86                 return;
87 #endif
88         dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
89 }
90
91 static inline void dma_sync_single_range(struct device *dev,
92                                          dma_addr_t dma_handle,
93                                          unsigned long offset, size_t size,
94                                          enum dma_data_direction dir)
95 {
96 #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
97         if (dev->bus == &pci_bus_type)
98                 return;
99 #endif
100         dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
101 }
102
103 static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
104                                int nelems, enum dma_data_direction dir)
105 {
106         int i;
107
108         for (i = 0; i < nelems; i++) {
109 #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
110                 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
111 #endif
112                 sg[i].dma_address = sg_phys(&sg[i]);
113         }
114 }
115
116 static inline void dma_sync_single_for_cpu(struct device *dev,
117                                            dma_addr_t dma_handle, size_t size,
118                                            enum dma_data_direction dir)
119 {
120         dma_sync_single(dev, dma_handle, size, dir);
121 }
122
123 static inline void dma_sync_single_for_device(struct device *dev,
124                                               dma_addr_t dma_handle,
125                                               size_t size,
126                                               enum dma_data_direction dir)
127 {
128         dma_sync_single(dev, dma_handle, size, dir);
129 }
130
131 static inline void dma_sync_single_range_for_cpu(struct device *dev,
132                                                  dma_addr_t dma_handle,
133                                                  unsigned long offset,
134                                                  size_t size,
135                                                  enum dma_data_direction direction)
136 {
137         dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
138 }
139
140 static inline void dma_sync_single_range_for_device(struct device *dev,
141                                                     dma_addr_t dma_handle,
142                                                     unsigned long offset,
143                                                     size_t size,
144                                                     enum dma_data_direction direction)
145 {
146         dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
147 }
148
149
150 static inline void dma_sync_sg_for_cpu(struct device *dev,
151                                        struct scatterlist *sg, int nelems,
152                                        enum dma_data_direction dir)
153 {
154         dma_sync_sg(dev, sg, nelems, dir);
155 }
156
157 static inline void dma_sync_sg_for_device(struct device *dev,
158                                           struct scatterlist *sg, int nelems,
159                                           enum dma_data_direction dir)
160 {
161         dma_sync_sg(dev, sg, nelems, dir);
162 }
163
164
165 static inline int dma_get_cache_alignment(void)
166 {
167         /*
168          * Each processor family will define its own L1_CACHE_SHIFT,
169          * L1_CACHE_BYTES wraps to this, so this is always safe.
170          */
171         return L1_CACHE_BYTES;
172 }
173
174 static inline int dma_mapping_error(dma_addr_t dma_addr)
175 {
176         return dma_addr == 0;
177 }
178
179 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
180
181 extern int
182 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
183                             dma_addr_t device_addr, size_t size, int flags);
184
185 extern void
186 dma_release_declared_memory(struct device *dev);
187
188 extern void *
189 dma_mark_declared_memory_occupied(struct device *dev,
190                                   dma_addr_t device_addr, size_t size);
191
192 #endif /* __ASM_SH_DMA_MAPPING_H */