1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
12 static int forbid_dac __read_mostly;
14 const struct dma_mapping_ops *dma_ops;
15 EXPORT_SYMBOL(dma_ops);
17 static int iommu_sac_force __read_mostly;
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly = 1;
21 int force_iommu __read_mostly = 1;
23 int panic_on_overflow __read_mostly = 0;
24 int force_iommu __read_mostly = 0;
27 int iommu_merge __read_mostly = 0;
29 int no_iommu __read_mostly;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly = 0;
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
38 dma_addr_t bad_dma_address __read_mostly = 0;
39 EXPORT_SYMBOL(bad_dma_address);
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
50 int dma_set_mask(struct device *dev, u64 mask)
52 if (!dev->dma_mask || !dma_supported(dev, mask))
55 *dev->dma_mask = mask;
59 EXPORT_SYMBOL(dma_set_mask);
62 static __initdata void *dma32_bootmem_ptr;
63 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
65 static int __init parse_dma32_size_opt(char *p)
69 dma32_bootmem_size = memparse(p, &p);
72 early_param("dma32_size", parse_dma32_size_opt);
74 void __init dma32_reserve_bootmem(void)
76 unsigned long size, align;
77 if (max_pfn <= MAX_DMA32_PFN)
81 * check aperture_64.c allocate_aperture() for reason about
85 size = round_up(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
88 if (dma32_bootmem_ptr)
89 dma32_bootmem_size = size;
91 dma32_bootmem_size = 0;
93 static void __init dma32_free_bootmem(void)
96 if (max_pfn <= MAX_DMA32_PFN)
99 if (!dma32_bootmem_ptr)
102 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
104 dma32_bootmem_ptr = NULL;
105 dma32_bootmem_size = 0;
108 void __init pci_iommu_alloc(void)
110 /* free the range so iommu could get some range less than 4G */
111 dma32_free_bootmem();
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
116 gart_iommu_hole_init();
120 detect_intel_iommu();
129 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
132 static __init int iommu_setup(char *p)
140 if (!strncmp(p, "off", 3))
142 /* gart_parse_options has more force support */
143 if (!strncmp(p, "force", 5))
145 if (!strncmp(p, "noforce", 7)) {
150 if (!strncmp(p, "biomerge", 8)) {
151 iommu_bio_merge = 4096;
155 if (!strncmp(p, "panic", 5))
156 panic_on_overflow = 1;
157 if (!strncmp(p, "nopanic", 7))
158 panic_on_overflow = 0;
159 if (!strncmp(p, "merge", 5)) {
163 if (!strncmp(p, "nomerge", 7))
165 if (!strncmp(p, "forcesac", 8))
167 if (!strncmp(p, "allowdac", 8))
169 if (!strncmp(p, "nodac", 5))
171 if (!strncmp(p, "usedac", 6)) {
175 #ifdef CONFIG_SWIOTLB
176 if (!strncmp(p, "soft", 4))
180 gart_parse_options(p);
182 #ifdef CONFIG_CALGARY_IOMMU
183 if (!strncmp(p, "calgary", 7))
185 #endif /* CONFIG_CALGARY_IOMMU */
187 p += strcspn(p, ",");
193 early_param("iommu", iommu_setup);
196 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
197 dma_addr_t device_addr, size_t size, int flags)
199 void __iomem *mem_base = NULL;
200 int pages = size >> PAGE_SHIFT;
201 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
203 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
210 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
212 mem_base = ioremap(bus_addr, size);
216 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
219 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
220 if (!dev->dma_mem->bitmap)
223 dev->dma_mem->virt_base = mem_base;
224 dev->dma_mem->device_base = device_addr;
225 dev->dma_mem->size = pages;
226 dev->dma_mem->flags = flags;
228 if (flags & DMA_MEMORY_MAP)
229 return DMA_MEMORY_MAP;
231 return DMA_MEMORY_IO;
240 EXPORT_SYMBOL(dma_declare_coherent_memory);
242 void dma_release_declared_memory(struct device *dev)
244 struct dma_coherent_mem *mem = dev->dma_mem;
249 iounmap(mem->virt_base);
253 EXPORT_SYMBOL(dma_release_declared_memory);
255 void *dma_mark_declared_memory_occupied(struct device *dev,
256 dma_addr_t device_addr, size_t size)
258 struct dma_coherent_mem *mem = dev->dma_mem;
260 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
262 pages >>= PAGE_SHIFT;
265 return ERR_PTR(-EINVAL);
267 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
268 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
271 return mem->virt_base + (pos << PAGE_SHIFT);
273 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
275 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
276 dma_addr_t *dma_handle, void **ret)
278 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
279 int order = get_order(size);
282 int page = bitmap_find_free_region(mem->bitmap, mem->size,
285 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
286 *ret = mem->virt_base + (page << PAGE_SHIFT);
287 memset(*ret, 0, size);
289 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
292 return (mem != NULL);
295 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
297 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
299 if (mem && vaddr >= mem->virt_base && vaddr <
300 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
301 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
303 bitmap_release_region(mem->bitmap, page, order);
309 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
310 #define dma_release_coherent(dev, order, vaddr) (0)
311 #endif /* CONFIG_X86_32 */
313 int dma_supported(struct device *dev, u64 mask)
316 if (mask > 0xffffffff && forbid_dac > 0) {
317 dev_info(dev, "PCI: Disallowing DAC for device\n");
322 if (dma_ops->dma_supported)
323 return dma_ops->dma_supported(dev, mask);
325 /* Copied from i386. Doesn't make much sense, because it will
326 only work for pci_alloc_coherent.
327 The caller just has to use GFP_DMA in this case. */
328 if (mask < DMA_24BIT_MASK)
331 /* Tell the device to use SAC when IOMMU force is on. This
332 allows the driver to use cheaper accesses in some cases.
334 Problem with this is that if we overflow the IOMMU area and
335 return DAC as fallback address the device may not handle it
338 As a special case some controllers have a 39bit address
339 mode that is as efficient as 32bit (aic79xx). Don't force
340 SAC for these. Assume all masks <= 40 bits are of this
341 type. Normally this doesn't make any difference, but gives
342 more gentle handling of IOMMU overflow. */
343 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
344 dev_info(dev, "Force SAC with mask %Lx\n", mask);
350 EXPORT_SYMBOL(dma_supported);
352 /* Allocate DMA memory on node near device */
353 static noinline struct page *
354 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
358 node = dev_to_node(dev);
360 return alloc_pages_node(node, gfp, order);
364 * Allocate memory for a coherent mapping.
367 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
372 unsigned long dma_mask = 0;
376 /* ignore region specifiers */
377 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
379 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
386 dma_mask = dev->coherent_dma_mask;
388 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
390 /* Device not DMA able */
391 if (dev->dma_mask == NULL)
394 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
399 /* Why <=? Even when the mask is smaller than 4GB it is often
400 larger than 16MB and in this case we have a chance of
401 finding fitting memory in the next higher zone first. If
402 not retry with true GFP_DMA. -AK */
403 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
405 if (dma_mask < DMA_32BIT_MASK)
411 page = dma_alloc_pages(dev,
412 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
418 bus = page_to_phys(page);
419 memory = page_address(page);
420 high = (bus + size) >= dma_mask;
422 if (force_iommu && !(gfp & GFP_DMA))
425 free_pages((unsigned long)memory,
428 /* Don't use the 16MB ZONE_DMA unless absolutely
429 needed. It's better to use remapping first. */
430 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
431 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
435 /* Let low level make its own zone decisions */
436 gfp &= ~(GFP_DMA32|GFP_DMA);
438 if (dma_ops->alloc_coherent)
439 return dma_ops->alloc_coherent(dev, size,
444 memset(memory, 0, size);
451 if (dma_ops->alloc_coherent) {
452 free_pages((unsigned long)memory, get_order(size));
453 gfp &= ~(GFP_DMA|GFP_DMA32);
454 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
457 if (dma_ops->map_simple) {
458 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
460 PCI_DMA_BIDIRECTIONAL);
461 if (*dma_handle != bad_dma_address)
465 if (panic_on_overflow)
466 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
467 (unsigned long)size);
468 free_pages((unsigned long)memory, get_order(size));
471 EXPORT_SYMBOL(dma_alloc_coherent);
474 * Unmap coherent memory.
475 * The caller must ensure that the device has finished accessing the mapping.
477 void dma_free_coherent(struct device *dev, size_t size,
478 void *vaddr, dma_addr_t bus)
480 int order = get_order(size);
481 WARN_ON(irqs_disabled()); /* for portability */
482 if (dma_release_coherent(dev, order, vaddr))
484 if (dma_ops->unmap_single)
485 dma_ops->unmap_single(dev, bus, size, 0);
486 free_pages((unsigned long)vaddr, order);
488 EXPORT_SYMBOL(dma_free_coherent);
490 static int __init pci_iommu_init(void)
492 calgary_iommu_init();
504 void pci_iommu_shutdown(void)
506 gart_iommu_shutdown();
508 /* Must execute after PCI subsystem */
509 fs_initcall(pci_iommu_init);
512 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
514 static __devinit void via_no_dac(struct pci_dev *dev)
516 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
517 printk(KERN_INFO "PCI: VIA PCI bridge detected."
522 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);