2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
8 * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
9 * a description of how these routines should be used.
12 #include <linux/module.h>
13 #include <linux/dma-mapping.h>
15 #include <asm/sn/intr.h>
16 #include <asm/sn/pcibus_provider_defs.h>
17 #include <asm/sn/pcidev.h>
18 #include <asm/sn/sn_sal.h>
20 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
21 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
24 * sn_dma_supported - test a DMA mask
25 * @dev: device to test
26 * @mask: DMA mask to test
28 * Return whether the given PCI device DMA address mask can be supported
29 * properly. For example, if your device can only drive the low 24-bits
30 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
31 * this function. Of course, SN only supports devices that have 32 or more
32 * address bits when using the PMU.
34 static int sn_dma_supported(struct device *dev, u64 mask)
36 BUG_ON(dev->bus != &pci_bus_type);
38 if (mask < 0x7fffffff)
44 * sn_dma_set_mask - set the DMA mask
48 * Set @dev's DMA mask if the hw supports it.
50 int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52 BUG_ON(dev->bus != &pci_bus_type);
54 if (!sn_dma_supported(dev, dma_mask))
57 *dev->dma_mask = dma_mask;
60 EXPORT_SYMBOL(sn_dma_set_mask);
63 * sn_dma_alloc_coherent - allocate memory for coherent DMA
64 * @dev: device to allocate for
65 * @size: size of the region
66 * @dma_handle: DMA (bus) address
67 * @flags: memory allocation flags
69 * dma_alloc_coherent() returns a pointer to a memory region suitable for
70 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
71 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
73 * This interface is usually used for "command" streams (e.g. the command
74 * queue for a SCSI controller). See Documentation/DMA-API.txt for
77 static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
78 dma_addr_t * dma_handle, gfp_t flags)
81 unsigned long phys_addr;
83 struct pci_dev *pdev = to_pci_dev(dev);
84 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
86 BUG_ON(dev->bus != &pci_bus_type);
89 * Allocate the memory.
91 node = pcibus_to_node(pdev->bus);
92 if (likely(node >=0)) {
93 struct page *p = alloc_pages_node(node, flags, get_order(size));
96 cpuaddr = page_address(p);
100 cpuaddr = (void *)__get_free_pages(flags, get_order(size));
102 if (unlikely(!cpuaddr))
105 memset(cpuaddr, 0x0, size);
107 /* physical addr. of the memory we just got */
108 phys_addr = __pa(cpuaddr);
111 * 64 bit address translations should never fail.
112 * 32 bit translations can fail if there are insufficient mapping
116 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
119 printk(KERN_ERR "%s: out of ATEs\n", __func__);
120 free_pages((unsigned long)cpuaddr, get_order(size));
128 * sn_pci_free_coherent - free memory associated with coherent DMAable region
129 * @dev: device to free for
130 * @size: size to free
131 * @cpu_addr: kernel virtual address to free
132 * @dma_handle: DMA address associated with this region
134 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
135 * any associated IOMMU mappings.
137 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
138 dma_addr_t dma_handle)
140 struct pci_dev *pdev = to_pci_dev(dev);
141 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
143 BUG_ON(dev->bus != &pci_bus_type);
145 provider->dma_unmap(pdev, dma_handle, 0);
146 free_pages((unsigned long)cpu_addr, get_order(size));
150 * sn_dma_map_single_attrs - map a single page for DMA
151 * @dev: device to map for
152 * @cpu_addr: kernel virtual address of the region to map
153 * @size: size of the region
154 * @direction: DMA direction
155 * @attrs: optional dma attributes
157 * Map the region pointed to by @cpu_addr for DMA and return the
160 * We map this to the one step pcibr_dmamap_trans interface rather than
161 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
162 * no way of saving the dmamap handle from the alloc to later free
163 * (which is pretty much unacceptable).
165 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
166 * dma_map_consistent() so that writes force a flush of pending DMA.
167 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
168 * Document Number: 007-4763-001)
170 * TODO: simplify our interface;
171 * figure out how to save dmamap handle so can use two step.
173 static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
174 unsigned long offset, size_t size,
175 enum dma_data_direction dir,
176 struct dma_attrs *attrs)
178 void *cpu_addr = page_address(page) + offset;
180 unsigned long phys_addr;
181 struct pci_dev *pdev = to_pci_dev(dev);
182 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
185 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
187 BUG_ON(dev->bus != &pci_bus_type);
189 phys_addr = __pa(cpu_addr);
191 dma_addr = provider->dma_map_consistent(pdev, phys_addr,
192 size, SN_DMA_ADDR_PHYS);
194 dma_addr = provider->dma_map(pdev, phys_addr, size,
198 printk(KERN_ERR "%s: out of ATEs\n", __func__);
205 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
206 * @dev: device to sync
207 * @dma_addr: DMA address to sync
208 * @size: size of region
209 * @direction: DMA direction
210 * @attrs: optional dma attributes
212 * This routine is supposed to sync the DMA region specified
213 * by @dma_handle into the coherence domain. On SN, we're always cache
214 * coherent, so we just need to free any ATEs associated with this mapping.
216 static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
217 size_t size, enum dma_data_direction dir,
218 struct dma_attrs *attrs)
220 struct pci_dev *pdev = to_pci_dev(dev);
221 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
223 BUG_ON(dev->bus != &pci_bus_type);
225 provider->dma_unmap(pdev, dma_addr, dir);
229 * sn_dma_unmap_sg - unmap a DMA scatterlist
230 * @dev: device to unmap
231 * @sg: scatterlist to unmap
232 * @nhwentries: number of scatterlist entries
233 * @direction: DMA direction
234 * @attrs: optional dma attributes
236 * Unmap a set of streaming mode DMA translations.
238 static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
239 int nhwentries, enum dma_data_direction dir,
240 struct dma_attrs *attrs)
243 struct pci_dev *pdev = to_pci_dev(dev);
244 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
245 struct scatterlist *sg;
247 BUG_ON(dev->bus != &pci_bus_type);
249 for_each_sg(sgl, sg, nhwentries, i) {
250 provider->dma_unmap(pdev, sg->dma_address, dir);
251 sg->dma_address = (dma_addr_t) NULL;
257 * sn_dma_map_sg - map a scatterlist for DMA
258 * @dev: device to map for
259 * @sg: scatterlist to map
260 * @nhwentries: number of entries
261 * @direction: direction of the DMA transaction
262 * @attrs: optional dma attributes
264 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
265 * dma_map_consistent() so that writes force a flush of pending DMA.
266 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
267 * Document Number: 007-4763-001)
269 * Maps each entry of @sg for DMA.
271 static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
272 int nhwentries, enum dma_data_direction dir,
273 struct dma_attrs *attrs)
275 unsigned long phys_addr;
276 struct scatterlist *saved_sg = sgl, *sg;
277 struct pci_dev *pdev = to_pci_dev(dev);
278 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
282 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
284 BUG_ON(dev->bus != &pci_bus_type);
287 * Setup a DMA address for each entry in the scatterlist.
289 for_each_sg(sgl, sg, nhwentries, i) {
291 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
293 dma_addr = provider->dma_map_consistent(pdev,
298 dma_addr = provider->dma_map(pdev, phys_addr,
302 sg->dma_address = dma_addr;
303 if (!sg->dma_address) {
304 printk(KERN_ERR "%s: out of ATEs\n", __func__);
307 * Free any successfully allocated entries.
310 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
314 sg->dma_length = sg->length;
320 static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
321 size_t size, enum dma_data_direction dir)
323 BUG_ON(dev->bus != &pci_bus_type);
326 static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
328 enum dma_data_direction dir)
330 BUG_ON(dev->bus != &pci_bus_type);
333 static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
334 int nelems, enum dma_data_direction dir)
336 BUG_ON(dev->bus != &pci_bus_type);
339 static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
340 int nelems, enum dma_data_direction dir)
342 BUG_ON(dev->bus != &pci_bus_type);
345 static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
350 u64 sn_dma_get_required_mask(struct device *dev)
352 return DMA_64BIT_MASK;
354 EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
356 char *sn_pci_get_legacy_mem(struct pci_bus *bus)
358 if (!SN_PCIBUS_BUSSOFT(bus))
359 return ERR_PTR(-ENODEV);
361 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
364 int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
368 struct ia64_sal_retval isrv;
371 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
372 * around hw issues at the pci bus level. SGI proms older than
373 * 4.10 don't implement this.
376 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
377 pci_domain_nr(bus), bus->number,
380 port, size, __pa(val));
382 if (isrv.status == 0)
386 * If the above failed, retry using the SAL_PROBE call which should
387 * be present in all proms (but which cannot work round PCI chipset
388 * bugs). This code is retained for compatibility with old
389 * pre-4.10 proms, and should be removed at some point in the future.
392 if (!SN_PCIBUS_BUSSOFT(bus))
395 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
398 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
409 int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
414 struct ia64_sal_retval isrv;
417 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
418 * around hw issues at the pci bus level. SGI proms older than
419 * 4.10 don't implement this.
422 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
423 pci_domain_nr(bus), bus->number,
426 port, size, __pa(&val));
428 if (isrv.status == 0)
432 * If the above failed, retry using the SAL_PROBE call which should
433 * be present in all proms (but which cannot work round PCI chipset
434 * bugs). This code is retained for compatibility with old
435 * pre-4.10 proms, and should be removed at some point in the future.
438 if (!SN_PCIBUS_BUSSOFT(bus)) {
443 /* Put the phys addr in uncached space */
444 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
446 addr = (unsigned long *)paddr;
450 *(volatile u8 *)(addr) = (u8)(val);
453 *(volatile u16 *)(addr) = (u16)(val);
456 *(volatile u32 *)(addr) = (u32)(val);
466 static struct dma_map_ops sn_dma_ops = {
467 .alloc_coherent = sn_dma_alloc_coherent,
468 .free_coherent = sn_dma_free_coherent,
469 .map_page = sn_dma_map_page,
470 .unmap_page = sn_dma_unmap_page,
471 .map_sg = sn_dma_map_sg,
472 .unmap_sg = sn_dma_unmap_sg,
473 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
474 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
475 .sync_single_for_device = sn_dma_sync_single_for_device,
476 .sync_sg_for_device = sn_dma_sync_sg_for_device,
477 .mapping_error = sn_dma_mapping_error,
478 .dma_supported = sn_dma_supported,
481 void sn_dma_init(void)
483 dma_ops = &sn_dma_ops;