2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/config.h>
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <asm/atomic.h>
29 #include <asm/pgtable.h>
30 #include <asm/proto.h>
31 #include <asm/cacheflush.h>
32 #include <asm/kdebug.h>
33 #include <asm/swiotlb.h>
37 unsigned long iommu_bus_base; /* GART remapping area (physical) */
38 static unsigned long iommu_size; /* size of remapping area bytes */
39 static unsigned long iommu_pages; /* .. and in pages */
41 u32 *iommu_gatt_base; /* Remapping table */
43 /* If this is disabled the IOMMU will use an optimized flushing strategy
44 of only flushing when an mapping is reused. With it true the GART is flushed
45 for every mapping. Problem is that doing the lazy flush seems to trigger
46 bugs with some popular PCI cards, in particular 3ware (but has been also
47 also seen with Qlogic at least). */
48 int iommu_fullflush = 1;
50 /* Allocation bitmap for the remapping area */
51 static DEFINE_SPINLOCK(iommu_bitmap_lock);
52 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
54 static u32 gart_unmapped_entry;
57 #define GPTE_COHERENT 2
58 #define GPTE_ENCODE(x) \
59 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
60 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
62 #define to_pages(addr,size) \
63 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
65 #define EMERGENCY_PAGES 32 /* = 128KB */
68 #define AGPEXTERN extern
73 /* backdoor interface to AGP driver */
74 AGPEXTERN int agp_memory_reserved;
75 AGPEXTERN __u32 *agp_gatt_table;
77 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
78 static int need_flush; /* global flush state. set for each gart wrap */
80 static unsigned long alloc_iommu(int size)
82 unsigned long offset, flags;
84 spin_lock_irqsave(&iommu_bitmap_lock, flags);
85 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
88 offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
91 set_bit_string(iommu_gart_bitmap, offset, size);
92 next_bit = offset+size;
93 if (next_bit >= iommu_pages) {
100 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
104 static void free_iommu(unsigned long offset, int size)
107 spin_lock_irqsave(&iommu_bitmap_lock, flags);
108 __clear_bit_string(iommu_gart_bitmap, offset, size);
109 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
113 * Use global flush state to avoid races with multiple flushers.
115 static void flush_gart(void)
118 spin_lock_irqsave(&iommu_bitmap_lock, flags);
123 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
126 #ifdef CONFIG_IOMMU_LEAK
128 #define SET_LEAK(x) if (iommu_leak_tab) \
129 iommu_leak_tab[x] = __builtin_return_address(0);
130 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
131 iommu_leak_tab[x] = NULL;
133 /* Debugging aid for drivers that don't free their IOMMU tables */
134 static void **iommu_leak_tab;
135 static int leak_trace;
136 int iommu_leak_pages = 20;
141 if (dump || !iommu_leak_tab) return;
143 show_stack(NULL,NULL);
144 /* Very crude. dump some from the end of the table too */
145 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
146 for (i = 0; i < iommu_leak_pages; i+=2) {
147 printk("%lu: ", iommu_pages-i);
148 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
149 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
155 #define CLEAR_LEAK(x)
158 static void iommu_full(struct device *dev, size_t size, int dir)
161 * Ran out of IOMMU space for this operation. This is very bad.
162 * Unfortunately the drivers cannot handle this operation properly.
163 * Return some non mapped prereserved space in the aperture and
164 * let the Northbridge deal with it. This will result in garbage
165 * in the IO operation. When the size exceeds the prereserved space
166 * memory corruption will occur or random memory will be DMAed
167 * out. Hopefully no network devices use single mappings that big.
171 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
174 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
175 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
176 panic("PCI-DMA: Memory would be corrupted\n");
177 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
178 panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
181 #ifdef CONFIG_IOMMU_LEAK
186 static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
188 u64 mask = *dev->dma_mask;
189 int high = addr + size >= mask;
196 static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
198 u64 mask = *dev->dma_mask;
199 int high = addr + size >= mask;
204 /* Map a single continuous physical area into the IOMMU.
205 * Caller needs to check if the iommu is needed and flush.
207 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
208 size_t size, int dir)
210 unsigned long npages = to_pages(phys_mem, size);
211 unsigned long iommu_page = alloc_iommu(npages);
213 if (iommu_page == -1) {
214 if (!nonforced_iommu(dev, phys_mem, size))
216 if (panic_on_overflow)
217 panic("dma_map_area overflow %lu bytes\n", size);
218 iommu_full(dev, size, dir);
219 return bad_dma_address;
222 for (i = 0; i < npages; i++) {
223 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
224 SET_LEAK(iommu_page + i);
225 phys_mem += PAGE_SIZE;
227 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
230 static dma_addr_t gart_map_simple(struct device *dev, char *buf,
231 size_t size, int dir)
233 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
238 /* Map a single area into the IOMMU */
239 dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
241 unsigned long phys_mem, bus;
243 BUG_ON(dir == DMA_NONE);
248 phys_mem = virt_to_phys(addr);
249 if (!need_iommu(dev, phys_mem, size))
252 bus = gart_map_simple(dev, addr, size, dir);
257 * Free a DMA mapping.
259 void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
260 size_t size, int direction)
262 unsigned long iommu_page;
266 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
267 dma_addr >= iommu_bus_base + iommu_size)
269 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
270 npages = to_pages(dma_addr, size);
271 for (i = 0; i < npages; i++) {
272 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
273 CLEAR_LEAK(iommu_page + i);
275 free_iommu(iommu_page, npages);
279 * Wrapper for pci_unmap_single working with scatterlists.
281 void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
285 for (i = 0; i < nents; i++) {
286 struct scatterlist *s = &sg[i];
287 if (!s->dma_length || !s->length)
289 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
293 /* Fallback for dma_map_sg in case of overflow */
294 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
299 #ifdef CONFIG_IOMMU_DEBUG
300 printk(KERN_DEBUG "dma_map_sg overflow\n");
303 for (i = 0; i < nents; i++ ) {
304 struct scatterlist *s = &sg[i];
305 unsigned long addr = page_to_phys(s->page) + s->offset;
306 if (nonforced_iommu(dev, addr, s->length)) {
307 addr = dma_map_area(dev, addr, s->length, dir);
308 if (addr == bad_dma_address) {
310 gart_unmap_sg(dev, sg, i, dir);
312 sg[0].dma_length = 0;
316 s->dma_address = addr;
317 s->dma_length = s->length;
323 /* Map multiple scatterlist entries continuous into the first. */
324 static int __dma_map_cont(struct scatterlist *sg, int start, int stopat,
325 struct scatterlist *sout, unsigned long pages)
327 unsigned long iommu_start = alloc_iommu(pages);
328 unsigned long iommu_page = iommu_start;
331 if (iommu_start == -1)
334 for (i = start; i < stopat; i++) {
335 struct scatterlist *s = &sg[i];
336 unsigned long pages, addr;
337 unsigned long phys_addr = s->dma_address;
339 BUG_ON(i > start && s->offset);
342 sout->dma_address = iommu_bus_base;
343 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
344 sout->dma_length = s->length;
346 sout->dma_length += s->length;
350 pages = to_pages(s->offset, s->length);
352 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
353 SET_LEAK(iommu_page);
358 BUG_ON(iommu_page - iommu_start != pages);
362 static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
363 struct scatterlist *sout,
364 unsigned long pages, int need)
367 BUG_ON(stopat - start != 1);
369 sout->dma_length = sg[start].length;
372 return __dma_map_cont(sg, start, stopat, sout, pages);
376 * DMA map all entries in a scatterlist.
377 * Merge chunks that have page aligned sizes into a continuous mapping.
379 int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
384 unsigned long pages = 0;
385 int need = 0, nextneed;
387 BUG_ON(dir == DMA_NONE);
396 for (i = 0; i < nents; i++) {
397 struct scatterlist *s = &sg[i];
398 dma_addr_t addr = page_to_phys(s->page) + s->offset;
399 s->dma_address = addr;
400 BUG_ON(s->length == 0);
402 nextneed = need_iommu(dev, addr, s->length);
404 /* Handle the previous not yet processed entries */
406 struct scatterlist *ps = &sg[i-1];
407 /* Can only merge when the last chunk ends on a page
408 boundary and the new one doesn't have an offset. */
409 if (!iommu_merge || !nextneed || !need || s->offset ||
410 (ps->offset + ps->length) % PAGE_SIZE) {
411 if (dma_map_cont(sg, start, i, sg+out, pages,
421 pages += to_pages(s->offset, s->length);
423 if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
428 sg[out].dma_length = 0;
433 gart_unmap_sg(dev, sg, nents, dir);
434 /* When it was forced or merged try again in a dumb way */
435 if (force_iommu || iommu_merge) {
436 out = dma_map_sg_nonforce(dev, sg, nents, dir);
440 if (panic_on_overflow)
441 panic("dma_map_sg: overflow on %lu pages\n", pages);
442 iommu_full(dev, pages << PAGE_SHIFT, dir);
443 for (i = 0; i < nents; i++)
444 sg[i].dma_address = bad_dma_address;
450 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
454 iommu_size = aper_size;
459 a = aper + iommu_size;
460 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
462 if (iommu_size < 64*1024*1024)
464 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
469 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
471 unsigned aper_size = 0, aper_base_32;
475 pci_read_config_dword(dev, 0x94, &aper_base_32);
476 pci_read_config_dword(dev, 0x90, &aper_order);
477 aper_order = (aper_order >> 1) & 7;
479 aper_base = aper_base_32 & 0x7fff;
482 aper_size = (32 * 1024 * 1024) << aper_order;
483 if (aper_base + aper_size >= 0xffffffff || !aper_size)
491 * Private Northbridge GATT initialization in case we cannot use the
492 * AGP driver for some reason.
494 static __init int init_k8_gatt(struct agp_kern_info *info)
498 unsigned aper_base, new_aper_base;
499 unsigned aper_size, gatt_size, new_aper_size;
502 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
503 aper_size = aper_base = info->aper_size = 0;
505 for (i = 0; i < num_k8_northbridges; i++) {
506 dev = k8_northbridges[i];
507 new_aper_base = read_aperture(dev, &new_aper_size);
512 aper_size = new_aper_size;
513 aper_base = new_aper_base;
515 if (aper_size != new_aper_size || aper_base != new_aper_base)
520 info->aper_base = aper_base;
521 info->aper_size = aper_size>>20;
523 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
524 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
526 panic("Cannot allocate GATT table");
527 memset(gatt, 0, gatt_size);
528 agp_gatt_table = gatt;
530 for (i = 0; i < num_k8_northbridges; i++) {
534 dev = k8_northbridges[i];
535 gatt_reg = __pa(gatt) >> 12;
537 pci_write_config_dword(dev, 0x98, gatt_reg);
538 pci_read_config_dword(dev, 0x90, &ctl);
541 ctl &= ~((1<<4) | (1<<5));
543 pci_write_config_dword(dev, 0x90, ctl);
547 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
551 /* Should not happen anymore */
552 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
553 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
557 extern int agp_amd64_init(void);
559 static struct dma_mapping_ops gart_dma_ops = {
560 .mapping_error = NULL,
561 .map_single = gart_map_single,
562 .map_simple = gart_map_simple,
563 .unmap_single = gart_unmap_single,
564 .sync_single_for_cpu = NULL,
565 .sync_single_for_device = NULL,
566 .sync_single_range_for_cpu = NULL,
567 .sync_single_range_for_device = NULL,
568 .sync_sg_for_cpu = NULL,
569 .sync_sg_for_device = NULL,
570 .map_sg = gart_map_sg,
571 .unmap_sg = gart_unmap_sg,
574 void __init gart_iommu_init(void)
576 struct agp_kern_info info;
577 unsigned long aper_size;
578 unsigned long iommu_start;
579 unsigned long scratch;
582 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
583 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
587 #ifndef CONFIG_AGP_AMD64
590 /* Makefile puts PCI initialization via subsys_initcall first. */
591 /* Add other K8 AGP bridge drivers here */
593 (agp_amd64_init() < 0) ||
594 (agp_copy_info(agp_bridge, &info) < 0);
600 /* Did we detect a different HW IOMMU? */
601 if (iommu_detected && !iommu_aperture)
605 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
607 (no_agp && init_k8_gatt(&info) < 0)) {
608 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
609 if (end_pfn > MAX_DMA32_PFN) {
610 printk(KERN_ERR "WARNING more than 4GB of memory "
611 "but IOMMU not available.\n"
612 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
617 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
618 aper_size = info.aper_size * 1024 * 1024;
619 iommu_size = check_iommu_size(info.aper_base, aper_size);
620 iommu_pages = iommu_size >> PAGE_SHIFT;
622 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
623 get_order(iommu_pages/8));
624 if (!iommu_gart_bitmap)
625 panic("Cannot allocate iommu bitmap\n");
626 memset(iommu_gart_bitmap, 0, iommu_pages/8);
628 #ifdef CONFIG_IOMMU_LEAK
630 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
631 get_order(iommu_pages*sizeof(void *)));
633 memset(iommu_leak_tab, 0, iommu_pages * 8);
635 printk("PCI-DMA: Cannot allocate leak trace area\n");
640 * Out of IOMMU space handling.
641 * Reserve some invalid pages at the beginning of the GART.
643 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
645 agp_memory_reserved = iommu_size;
647 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
650 iommu_start = aper_size - iommu_size;
651 iommu_bus_base = info.aper_base + iommu_start;
652 bad_dma_address = iommu_bus_base;
653 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
656 * Unmap the IOMMU part of the GART. The alias of the page is
657 * always mapped with cache enabled and there is no full cache
658 * coherency across the GART remapping. The unmapping avoids
659 * automatic prefetches from the CPU allocating cache lines in
660 * there. All CPU accesses are done via the direct mapping to
661 * the backing memory. The GART address is only used by PCI
664 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
667 * Try to workaround a bug (thanks to BenH)
668 * Set unmapped entries to a scratch page instead of 0.
669 * Any prefetches that hit unmapped entries won't get an bus abort
672 scratch = get_zeroed_page(GFP_KERNEL);
674 panic("Cannot allocate iommu scratch page");
675 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
676 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
677 iommu_gatt_base[i] = gart_unmapped_entry;
680 dma_ops = &gart_dma_ops;
683 void gart_parse_options(char *p)
687 #ifdef CONFIG_IOMMU_LEAK
688 if (!strncmp(p,"leak",4)) {
692 if (isdigit(*p) && get_option(&p, &arg))
693 iommu_leak_pages = arg;
696 if (isdigit(*p) && get_option(&p, &arg))
698 if (!strncmp(p, "fullflush",8))
700 if (!strncmp(p, "nofullflush",11))
702 if (!strncmp(p,"noagp",5))
704 if (!strncmp(p, "noaperture",10))
706 /* duplicated from pci-dma.c */
707 if (!strncmp(p,"force",5))
708 iommu_aperture_allowed = 1;
709 if (!strncmp(p,"allowed",7))
710 iommu_aperture_allowed = 1;
711 if (!strncmp(p, "memaper", 7)) {
712 fallback_aper_force = 1;
716 if (get_option(&p, &arg))
717 fallback_aper_order = arg;