x86-64: make BUILD_IRQ() also reset section back
[linux-2.6] / arch / x86 / kernel / pci-swiotlb_64.c
1 /* Glue code to lib/swiotlb.c */
2
3 #include <linux/pci.h>
4 #include <linux/cache.h>
5 #include <linux/module.h>
6 #include <linux/dma-mapping.h>
7
8 #include <asm/iommu.h>
9 #include <asm/swiotlb.h>
10 #include <asm/dma.h>
11
12 int swiotlb __read_mostly;
13
14 static dma_addr_t
15 swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
16                         int direction)
17 {
18         return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19 }
20
21 const struct dma_mapping_ops swiotlb_dma_ops = {
22         .mapping_error = swiotlb_dma_mapping_error,
23         .alloc_coherent = swiotlb_alloc_coherent,
24         .free_coherent = swiotlb_free_coherent,
25         .map_single = swiotlb_map_single_phys,
26         .unmap_single = swiotlb_unmap_single,
27         .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
28         .sync_single_for_device = swiotlb_sync_single_for_device,
29         .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
30         .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
31         .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
32         .sync_sg_for_device = swiotlb_sync_sg_for_device,
33         .map_sg = swiotlb_map_sg,
34         .unmap_sg = swiotlb_unmap_sg,
35         .dma_supported = NULL,
36 };
37
38 void __init pci_swiotlb_init(void)
39 {
40         /* don't initialize swiotlb if iommu=off (no_iommu=1) */
41         if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
42                swiotlb = 1;
43         if (swiotlb_force)
44                 swiotlb = 1;
45         if (swiotlb) {
46                 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
47                 swiotlb_init();
48                 dma_ops = &swiotlb_dma_ops;
49         }
50 }