2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
58 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
59 #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
60 #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
62 /* global iommu list, set NULL for ignored DMAR units */
63 static struct intel_iommu **g_iommus;
65 static int rwbf_quirk;
70 * 12-63: Context Ptr (12 - (haw-1))
77 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
78 static inline bool root_present(struct root_entry *root)
80 return (root->val & 1);
82 static inline void set_root_present(struct root_entry *root)
86 static inline void set_root_value(struct root_entry *root, unsigned long value)
88 root->val |= value & VTD_PAGE_MASK;
91 static inline struct context_entry *
92 get_context_addr_from_root(struct root_entry *root)
94 return (struct context_entry *)
95 (root_present(root)?phys_to_virt(
96 root->val & VTD_PAGE_MASK) :
103 * 1: fault processing disable
104 * 2-3: translation type
105 * 12-63: address space root
111 struct context_entry {
116 static inline bool context_present(struct context_entry *context)
118 return (context->lo & 1);
120 static inline void context_set_present(struct context_entry *context)
125 static inline void context_set_fault_enable(struct context_entry *context)
127 context->lo &= (((u64)-1) << 2) | 1;
130 #define CONTEXT_TT_MULTI_LEVEL 0
132 static inline void context_set_translation_type(struct context_entry *context,
135 context->lo &= (((u64)-1) << 4) | 3;
136 context->lo |= (value & 3) << 2;
139 static inline void context_set_address_root(struct context_entry *context,
142 context->lo |= value & VTD_PAGE_MASK;
145 static inline void context_set_address_width(struct context_entry *context,
148 context->hi |= value & 7;
151 static inline void context_set_domain_id(struct context_entry *context,
154 context->hi |= (value & ((1 << 16) - 1)) << 8;
157 static inline void context_clear_entry(struct context_entry *context)
170 * 12-63: Host physcial address
176 static inline void dma_clear_pte(struct dma_pte *pte)
181 static inline void dma_set_pte_readable(struct dma_pte *pte)
183 pte->val |= DMA_PTE_READ;
186 static inline void dma_set_pte_writable(struct dma_pte *pte)
188 pte->val |= DMA_PTE_WRITE;
191 static inline void dma_set_pte_snp(struct dma_pte *pte)
193 pte->val |= DMA_PTE_SNP;
196 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
198 pte->val = (pte->val & ~3) | (prot & 3);
201 static inline u64 dma_pte_addr(struct dma_pte *pte)
203 return (pte->val & VTD_PAGE_MASK);
206 static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
208 pte->val |= (addr & VTD_PAGE_MASK);
211 static inline bool dma_pte_present(struct dma_pte *pte)
213 return (pte->val & 3) != 0;
216 /* devices under the same p2p bridge are owned in one domain */
217 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
219 /* domain represents a virtual machine, more than one devices
220 * across iommus may be owned in one domain, e.g. kvm guest.
222 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
225 int id; /* domain id */
226 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
228 struct list_head devices; /* all devices' list */
229 struct iova_domain iovad; /* iova's that belong to this domain */
231 struct dma_pte *pgd; /* virtual address */
232 spinlock_t mapping_lock; /* page table lock */
233 int gaw; /* max guest address width */
235 /* adjusted guest address width, 0 is level 2 30-bit */
238 int flags; /* flags to find out type of domain */
240 int iommu_coherency;/* indicate coherency of iommu access */
241 int iommu_snooping; /* indicate snooping control feature*/
242 int iommu_count; /* reference count of iommu */
243 spinlock_t iommu_lock; /* protect iommu set in domain */
244 u64 max_addr; /* maximum mapped address */
247 /* PCI domain-device relationship */
248 struct device_domain_info {
249 struct list_head link; /* link to domain siblings */
250 struct list_head global; /* link to global list */
251 u8 bus; /* PCI bus numer */
252 u8 devfn; /* PCI devfn number */
253 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
254 struct dmar_domain *domain; /* pointer to domain */
257 static void flush_unmaps_timeout(unsigned long data);
259 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
261 #define HIGH_WATER_MARK 250
262 struct deferred_flush_tables {
264 struct iova *iova[HIGH_WATER_MARK];
265 struct dmar_domain *domain[HIGH_WATER_MARK];
268 static struct deferred_flush_tables *deferred_flush;
270 /* bitmap for indexing intel_iommus */
271 static int g_num_of_iommus;
273 static DEFINE_SPINLOCK(async_umap_flush_lock);
274 static LIST_HEAD(unmaps_to_do);
277 static long list_size;
279 static void domain_remove_dev_info(struct dmar_domain *domain);
281 #ifdef CONFIG_DMAR_DEFAULT_ON
282 int dmar_disabled = 0;
284 int dmar_disabled = 1;
285 #endif /*CONFIG_DMAR_DEFAULT_ON*/
287 static int __initdata dmar_map_gfx = 1;
288 static int dmar_forcedac;
289 static int intel_iommu_strict;
291 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
292 static DEFINE_SPINLOCK(device_domain_lock);
293 static LIST_HEAD(device_domain_list);
295 static struct iommu_ops intel_iommu_ops;
297 static int __init intel_iommu_setup(char *str)
302 if (!strncmp(str, "on", 2)) {
304 printk(KERN_INFO "Intel-IOMMU: enabled\n");
305 } else if (!strncmp(str, "off", 3)) {
307 printk(KERN_INFO "Intel-IOMMU: disabled\n");
308 } else if (!strncmp(str, "igfx_off", 8)) {
311 "Intel-IOMMU: disable GFX device mapping\n");
312 } else if (!strncmp(str, "forcedac", 8)) {
314 "Intel-IOMMU: Forcing DAC for PCI devices\n");
316 } else if (!strncmp(str, "strict", 6)) {
318 "Intel-IOMMU: disable batched IOTLB flush\n");
319 intel_iommu_strict = 1;
322 str += strcspn(str, ",");
328 __setup("intel_iommu=", intel_iommu_setup);
330 static struct kmem_cache *iommu_domain_cache;
331 static struct kmem_cache *iommu_devinfo_cache;
332 static struct kmem_cache *iommu_iova_cache;
334 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
339 /* trying to avoid low memory issues */
340 flags = current->flags & PF_MEMALLOC;
341 current->flags |= PF_MEMALLOC;
342 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
343 current->flags &= (~PF_MEMALLOC | flags);
348 static inline void *alloc_pgtable_page(void)
353 /* trying to avoid low memory issues */
354 flags = current->flags & PF_MEMALLOC;
355 current->flags |= PF_MEMALLOC;
356 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
357 current->flags &= (~PF_MEMALLOC | flags);
361 static inline void free_pgtable_page(void *vaddr)
363 free_page((unsigned long)vaddr);
366 static inline void *alloc_domain_mem(void)
368 return iommu_kmem_cache_alloc(iommu_domain_cache);
371 static void free_domain_mem(void *vaddr)
373 kmem_cache_free(iommu_domain_cache, vaddr);
376 static inline void * alloc_devinfo_mem(void)
378 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
381 static inline void free_devinfo_mem(void *vaddr)
383 kmem_cache_free(iommu_devinfo_cache, vaddr);
386 struct iova *alloc_iova_mem(void)
388 return iommu_kmem_cache_alloc(iommu_iova_cache);
391 void free_iova_mem(struct iova *iova)
393 kmem_cache_free(iommu_iova_cache, iova);
397 static inline int width_to_agaw(int width);
399 /* calculate agaw for each iommu.
400 * "SAGAW" may be different across iommus, use a default agaw, and
401 * get a supported less agaw for iommus that don't support the default agaw.
403 int iommu_calculate_agaw(struct intel_iommu *iommu)
408 sagaw = cap_sagaw(iommu->cap);
409 for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
411 if (test_bit(agaw, &sagaw))
418 /* in native case, each domain is related to only one iommu */
419 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
423 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
425 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
426 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
429 return g_iommus[iommu_id];
432 static void domain_update_iommu_coherency(struct dmar_domain *domain)
436 domain->iommu_coherency = 1;
438 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
439 for (; i < g_num_of_iommus; ) {
440 if (!ecap_coherent(g_iommus[i]->ecap)) {
441 domain->iommu_coherency = 0;
444 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
448 static void domain_update_iommu_snooping(struct dmar_domain *domain)
452 domain->iommu_snooping = 1;
454 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
455 for (; i < g_num_of_iommus; ) {
456 if (!ecap_sc_support(g_iommus[i]->ecap)) {
457 domain->iommu_snooping = 0;
460 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
464 /* Some capabilities may be different across iommus */
465 static void domain_update_iommu_cap(struct dmar_domain *domain)
467 domain_update_iommu_coherency(domain);
468 domain_update_iommu_snooping(domain);
471 static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
473 struct dmar_drhd_unit *drhd = NULL;
476 for_each_drhd_unit(drhd) {
480 for (i = 0; i < drhd->devices_cnt; i++)
481 if (drhd->devices[i] &&
482 drhd->devices[i]->bus->number == bus &&
483 drhd->devices[i]->devfn == devfn)
486 if (drhd->include_all)
493 static void domain_flush_cache(struct dmar_domain *domain,
494 void *addr, int size)
496 if (!domain->iommu_coherency)
497 clflush_cache_range(addr, size);
500 /* Gets context entry for a given bus and devfn */
501 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
504 struct root_entry *root;
505 struct context_entry *context;
506 unsigned long phy_addr;
509 spin_lock_irqsave(&iommu->lock, flags);
510 root = &iommu->root_entry[bus];
511 context = get_context_addr_from_root(root);
513 context = (struct context_entry *)alloc_pgtable_page();
515 spin_unlock_irqrestore(&iommu->lock, flags);
518 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
519 phy_addr = virt_to_phys((void *)context);
520 set_root_value(root, phy_addr);
521 set_root_present(root);
522 __iommu_flush_cache(iommu, root, sizeof(*root));
524 spin_unlock_irqrestore(&iommu->lock, flags);
525 return &context[devfn];
528 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
530 struct root_entry *root;
531 struct context_entry *context;
535 spin_lock_irqsave(&iommu->lock, flags);
536 root = &iommu->root_entry[bus];
537 context = get_context_addr_from_root(root);
542 ret = context_present(&context[devfn]);
544 spin_unlock_irqrestore(&iommu->lock, flags);
548 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
550 struct root_entry *root;
551 struct context_entry *context;
554 spin_lock_irqsave(&iommu->lock, flags);
555 root = &iommu->root_entry[bus];
556 context = get_context_addr_from_root(root);
558 context_clear_entry(&context[devfn]);
559 __iommu_flush_cache(iommu, &context[devfn], \
562 spin_unlock_irqrestore(&iommu->lock, flags);
565 static void free_context_table(struct intel_iommu *iommu)
567 struct root_entry *root;
570 struct context_entry *context;
572 spin_lock_irqsave(&iommu->lock, flags);
573 if (!iommu->root_entry) {
576 for (i = 0; i < ROOT_ENTRY_NR; i++) {
577 root = &iommu->root_entry[i];
578 context = get_context_addr_from_root(root);
580 free_pgtable_page(context);
582 free_pgtable_page(iommu->root_entry);
583 iommu->root_entry = NULL;
585 spin_unlock_irqrestore(&iommu->lock, flags);
588 /* page table handling */
589 #define LEVEL_STRIDE (9)
590 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
592 static inline int agaw_to_level(int agaw)
597 static inline int agaw_to_width(int agaw)
599 return 30 + agaw * LEVEL_STRIDE;
603 static inline int width_to_agaw(int width)
605 return (width - 30) / LEVEL_STRIDE;
608 static inline unsigned int level_to_offset_bits(int level)
610 return (12 + (level - 1) * LEVEL_STRIDE);
613 static inline int address_level_offset(u64 addr, int level)
615 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
618 static inline u64 level_mask(int level)
620 return ((u64)-1 << level_to_offset_bits(level));
623 static inline u64 level_size(int level)
625 return ((u64)1 << level_to_offset_bits(level));
628 static inline u64 align_to_level(u64 addr, int level)
630 return ((addr + level_size(level) - 1) & level_mask(level));
633 static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
635 int addr_width = agaw_to_width(domain->agaw);
636 struct dma_pte *parent, *pte = NULL;
637 int level = agaw_to_level(domain->agaw);
641 BUG_ON(!domain->pgd);
643 addr &= (((u64)1) << addr_width) - 1;
644 parent = domain->pgd;
646 spin_lock_irqsave(&domain->mapping_lock, flags);
650 offset = address_level_offset(addr, level);
651 pte = &parent[offset];
655 if (!dma_pte_present(pte)) {
656 tmp_page = alloc_pgtable_page();
659 spin_unlock_irqrestore(&domain->mapping_lock,
663 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
664 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
666 * high level table always sets r/w, last level page
667 * table control read/write
669 dma_set_pte_readable(pte);
670 dma_set_pte_writable(pte);
671 domain_flush_cache(domain, pte, sizeof(*pte));
673 parent = phys_to_virt(dma_pte_addr(pte));
677 spin_unlock_irqrestore(&domain->mapping_lock, flags);
681 /* return address's pte at specific level */
682 static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
685 struct dma_pte *parent, *pte = NULL;
686 int total = agaw_to_level(domain->agaw);
689 parent = domain->pgd;
690 while (level <= total) {
691 offset = address_level_offset(addr, total);
692 pte = &parent[offset];
696 if (!dma_pte_present(pte))
698 parent = phys_to_virt(dma_pte_addr(pte));
704 /* clear one page's page table */
705 static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
707 struct dma_pte *pte = NULL;
709 /* get last level pte */
710 pte = dma_addr_level_pte(domain, addr, 1);
714 domain_flush_cache(domain, pte, sizeof(*pte));
718 /* clear last level pte, a tlb flush should be followed */
719 static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
721 int addr_width = agaw_to_width(domain->agaw);
724 start &= (((u64)1) << addr_width) - 1;
725 end &= (((u64)1) << addr_width) - 1;
726 /* in case it's partial page */
727 start = PAGE_ALIGN(start);
729 npages = (end - start) / VTD_PAGE_SIZE;
731 /* we don't need lock here, nobody else touches the iova range */
733 dma_pte_clear_one(domain, start);
734 start += VTD_PAGE_SIZE;
738 /* free page table pages. last level pte should already be cleared */
739 static void dma_pte_free_pagetable(struct dmar_domain *domain,
742 int addr_width = agaw_to_width(domain->agaw);
744 int total = agaw_to_level(domain->agaw);
748 start &= (((u64)1) << addr_width) - 1;
749 end &= (((u64)1) << addr_width) - 1;
751 /* we don't need lock here, nobody else touches the iova range */
753 while (level <= total) {
754 tmp = align_to_level(start, level);
755 if (tmp >= end || (tmp + level_size(level) > end))
759 pte = dma_addr_level_pte(domain, tmp, level);
762 phys_to_virt(dma_pte_addr(pte)));
764 domain_flush_cache(domain, pte, sizeof(*pte));
766 tmp += level_size(level);
771 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
772 free_pgtable_page(domain->pgd);
778 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
780 struct root_entry *root;
783 root = (struct root_entry *)alloc_pgtable_page();
787 __iommu_flush_cache(iommu, root, ROOT_SIZE);
789 spin_lock_irqsave(&iommu->lock, flags);
790 iommu->root_entry = root;
791 spin_unlock_irqrestore(&iommu->lock, flags);
796 static void iommu_set_root_entry(struct intel_iommu *iommu)
802 addr = iommu->root_entry;
804 spin_lock_irqsave(&iommu->register_lock, flag);
805 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
807 cmd = iommu->gcmd | DMA_GCMD_SRTP;
808 writel(cmd, iommu->reg + DMAR_GCMD_REG);
810 /* Make sure hardware complete it */
811 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
812 readl, (sts & DMA_GSTS_RTPS), sts);
814 spin_unlock_irqrestore(&iommu->register_lock, flag);
817 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
822 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
824 val = iommu->gcmd | DMA_GCMD_WBF;
826 spin_lock_irqsave(&iommu->register_lock, flag);
827 writel(val, iommu->reg + DMAR_GCMD_REG);
829 /* Make sure hardware complete it */
830 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
831 readl, (!(val & DMA_GSTS_WBFS)), val);
833 spin_unlock_irqrestore(&iommu->register_lock, flag);
836 /* return value determine if we need a write buffer flush */
837 static int __iommu_flush_context(struct intel_iommu *iommu,
838 u16 did, u16 source_id, u8 function_mask, u64 type,
839 int non_present_entry_flush)
845 * In the non-present entry flush case, if hardware doesn't cache
846 * non-present entry we do nothing and if hardware cache non-present
847 * entry, we flush entries of domain 0 (the domain id is used to cache
848 * any non-present entries)
850 if (non_present_entry_flush) {
851 if (!cap_caching_mode(iommu->cap))
858 case DMA_CCMD_GLOBAL_INVL:
859 val = DMA_CCMD_GLOBAL_INVL;
861 case DMA_CCMD_DOMAIN_INVL:
862 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
864 case DMA_CCMD_DEVICE_INVL:
865 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
866 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
873 spin_lock_irqsave(&iommu->register_lock, flag);
874 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
876 /* Make sure hardware complete it */
877 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
878 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
880 spin_unlock_irqrestore(&iommu->register_lock, flag);
882 /* flush context entry will implicitly flush write buffer */
886 /* return value determine if we need a write buffer flush */
887 static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
888 u64 addr, unsigned int size_order, u64 type,
889 int non_present_entry_flush)
891 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
892 u64 val = 0, val_iva = 0;
896 * In the non-present entry flush case, if hardware doesn't cache
897 * non-present entry we do nothing and if hardware cache non-present
898 * entry, we flush entries of domain 0 (the domain id is used to cache
899 * any non-present entries)
901 if (non_present_entry_flush) {
902 if (!cap_caching_mode(iommu->cap))
909 case DMA_TLB_GLOBAL_FLUSH:
910 /* global flush doesn't need set IVA_REG */
911 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
913 case DMA_TLB_DSI_FLUSH:
914 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
916 case DMA_TLB_PSI_FLUSH:
917 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
918 /* Note: always flush non-leaf currently */
919 val_iva = size_order | addr;
924 /* Note: set drain read/write */
927 * This is probably to be super secure.. Looks like we can
928 * ignore it without any impact.
930 if (cap_read_drain(iommu->cap))
931 val |= DMA_TLB_READ_DRAIN;
933 if (cap_write_drain(iommu->cap))
934 val |= DMA_TLB_WRITE_DRAIN;
936 spin_lock_irqsave(&iommu->register_lock, flag);
937 /* Note: Only uses first TLB reg currently */
939 dmar_writeq(iommu->reg + tlb_offset, val_iva);
940 dmar_writeq(iommu->reg + tlb_offset + 8, val);
942 /* Make sure hardware complete it */
943 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
944 dmar_readq, (!(val & DMA_TLB_IVT)), val);
946 spin_unlock_irqrestore(&iommu->register_lock, flag);
948 /* check IOTLB invalidation granularity */
949 if (DMA_TLB_IAIG(val) == 0)
950 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
951 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
952 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
953 (unsigned long long)DMA_TLB_IIRG(type),
954 (unsigned long long)DMA_TLB_IAIG(val));
955 /* flush iotlb entry will implicitly flush write buffer */
959 static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
960 u64 addr, unsigned int pages, int non_present_entry_flush)
964 BUG_ON(addr & (~VTD_PAGE_MASK));
967 /* Fallback to domain selective flush if no PSI support */
968 if (!cap_pgsel_inv(iommu->cap))
969 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
971 non_present_entry_flush);
974 * PSI requires page size to be 2 ^ x, and the base address is naturally
975 * aligned to the size
977 mask = ilog2(__roundup_pow_of_two(pages));
978 /* Fallback to domain selective flush if size is too big */
979 if (mask > cap_max_amask_val(iommu->cap))
980 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
981 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
983 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
985 non_present_entry_flush);
988 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
993 spin_lock_irqsave(&iommu->register_lock, flags);
994 pmen = readl(iommu->reg + DMAR_PMEN_REG);
995 pmen &= ~DMA_PMEN_EPM;
996 writel(pmen, iommu->reg + DMAR_PMEN_REG);
998 /* wait for the protected region status bit to clear */
999 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1000 readl, !(pmen & DMA_PMEN_PRS), pmen);
1002 spin_unlock_irqrestore(&iommu->register_lock, flags);
1005 static int iommu_enable_translation(struct intel_iommu *iommu)
1008 unsigned long flags;
1010 spin_lock_irqsave(&iommu->register_lock, flags);
1011 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
1013 /* Make sure hardware complete it */
1014 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1015 readl, (sts & DMA_GSTS_TES), sts);
1017 iommu->gcmd |= DMA_GCMD_TE;
1018 spin_unlock_irqrestore(&iommu->register_lock, flags);
1022 static int iommu_disable_translation(struct intel_iommu *iommu)
1027 spin_lock_irqsave(&iommu->register_lock, flag);
1028 iommu->gcmd &= ~DMA_GCMD_TE;
1029 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1031 /* Make sure hardware complete it */
1032 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1033 readl, (!(sts & DMA_GSTS_TES)), sts);
1035 spin_unlock_irqrestore(&iommu->register_lock, flag);
1040 static int iommu_init_domains(struct intel_iommu *iommu)
1042 unsigned long ndomains;
1043 unsigned long nlongs;
1045 ndomains = cap_ndoms(iommu->cap);
1046 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1047 nlongs = BITS_TO_LONGS(ndomains);
1049 /* TBD: there might be 64K domains,
1050 * consider other allocation for future chip
1052 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1053 if (!iommu->domain_ids) {
1054 printk(KERN_ERR "Allocating domain id array failed\n");
1057 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1059 if (!iommu->domains) {
1060 printk(KERN_ERR "Allocating domain array failed\n");
1061 kfree(iommu->domain_ids);
1065 spin_lock_init(&iommu->lock);
1068 * if Caching mode is set, then invalid translations are tagged
1069 * with domainid 0. Hence we need to pre-allocate it.
1071 if (cap_caching_mode(iommu->cap))
1072 set_bit(0, iommu->domain_ids);
1077 static void domain_exit(struct dmar_domain *domain);
1078 static void vm_domain_exit(struct dmar_domain *domain);
1080 void free_dmar_iommu(struct intel_iommu *iommu)
1082 struct dmar_domain *domain;
1084 unsigned long flags;
1086 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1087 for (; i < cap_ndoms(iommu->cap); ) {
1088 domain = iommu->domains[i];
1089 clear_bit(i, iommu->domain_ids);
1091 spin_lock_irqsave(&domain->iommu_lock, flags);
1092 if (--domain->iommu_count == 0) {
1093 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1094 vm_domain_exit(domain);
1096 domain_exit(domain);
1098 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1100 i = find_next_bit(iommu->domain_ids,
1101 cap_ndoms(iommu->cap), i+1);
1104 if (iommu->gcmd & DMA_GCMD_TE)
1105 iommu_disable_translation(iommu);
1108 set_irq_data(iommu->irq, NULL);
1109 /* This will mask the irq */
1110 free_irq(iommu->irq, iommu);
1111 destroy_irq(iommu->irq);
1114 kfree(iommu->domains);
1115 kfree(iommu->domain_ids);
1117 g_iommus[iommu->seq_id] = NULL;
1119 /* if all iommus are freed, free g_iommus */
1120 for (i = 0; i < g_num_of_iommus; i++) {
1125 if (i == g_num_of_iommus)
1128 /* free context mapping */
1129 free_context_table(iommu);
1132 static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1135 unsigned long ndomains;
1136 struct dmar_domain *domain;
1137 unsigned long flags;
1139 domain = alloc_domain_mem();
1143 ndomains = cap_ndoms(iommu->cap);
1145 spin_lock_irqsave(&iommu->lock, flags);
1146 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1147 if (num >= ndomains) {
1148 spin_unlock_irqrestore(&iommu->lock, flags);
1149 free_domain_mem(domain);
1150 printk(KERN_ERR "IOMMU: no free domain ids\n");
1154 set_bit(num, iommu->domain_ids);
1156 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1157 set_bit(iommu->seq_id, &domain->iommu_bmp);
1159 iommu->domains[num] = domain;
1160 spin_unlock_irqrestore(&iommu->lock, flags);
1165 static void iommu_free_domain(struct dmar_domain *domain)
1167 unsigned long flags;
1168 struct intel_iommu *iommu;
1170 iommu = domain_get_iommu(domain);
1172 spin_lock_irqsave(&iommu->lock, flags);
1173 clear_bit(domain->id, iommu->domain_ids);
1174 spin_unlock_irqrestore(&iommu->lock, flags);
1177 static struct iova_domain reserved_iova_list;
1178 static struct lock_class_key reserved_alloc_key;
1179 static struct lock_class_key reserved_rbtree_key;
1181 static void dmar_init_reserved_ranges(void)
1183 struct pci_dev *pdev = NULL;
1188 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1190 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1191 &reserved_alloc_key);
1192 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1193 &reserved_rbtree_key);
1195 /* IOAPIC ranges shouldn't be accessed by DMA */
1196 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1197 IOVA_PFN(IOAPIC_RANGE_END));
1199 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1201 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1202 for_each_pci_dev(pdev) {
1205 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1206 r = &pdev->resource[i];
1207 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1211 size = r->end - addr;
1212 size = PAGE_ALIGN(size);
1213 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1214 IOVA_PFN(size + addr) - 1);
1216 printk(KERN_ERR "Reserve iova failed\n");
1222 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1224 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1227 static inline int guestwidth_to_adjustwidth(int gaw)
1230 int r = (gaw - 12) % 9;
1241 static int domain_init(struct dmar_domain *domain, int guest_width)
1243 struct intel_iommu *iommu;
1244 int adjust_width, agaw;
1245 unsigned long sagaw;
1247 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1248 spin_lock_init(&domain->mapping_lock);
1249 spin_lock_init(&domain->iommu_lock);
1251 domain_reserve_special_ranges(domain);
1253 /* calculate AGAW */
1254 iommu = domain_get_iommu(domain);
1255 if (guest_width > cap_mgaw(iommu->cap))
1256 guest_width = cap_mgaw(iommu->cap);
1257 domain->gaw = guest_width;
1258 adjust_width = guestwidth_to_adjustwidth(guest_width);
1259 agaw = width_to_agaw(adjust_width);
1260 sagaw = cap_sagaw(iommu->cap);
1261 if (!test_bit(agaw, &sagaw)) {
1262 /* hardware doesn't support it, choose a bigger one */
1263 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1264 agaw = find_next_bit(&sagaw, 5, agaw);
1268 domain->agaw = agaw;
1269 INIT_LIST_HEAD(&domain->devices);
1271 if (ecap_coherent(iommu->ecap))
1272 domain->iommu_coherency = 1;
1274 domain->iommu_coherency = 0;
1276 if (ecap_sc_support(iommu->ecap))
1277 domain->iommu_snooping = 1;
1279 domain->iommu_snooping = 0;
1281 domain->iommu_count = 1;
1283 /* always allocate the top pgd */
1284 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1287 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1291 static void domain_exit(struct dmar_domain *domain)
1295 /* Domain 0 is reserved, so dont process it */
1299 domain_remove_dev_info(domain);
1301 put_iova_domain(&domain->iovad);
1302 end = DOMAIN_MAX_ADDR(domain->gaw);
1303 end = end & (~PAGE_MASK);
1306 dma_pte_clear_range(domain, 0, end);
1308 /* free page tables */
1309 dma_pte_free_pagetable(domain, 0, end);
1311 iommu_free_domain(domain);
1312 free_domain_mem(domain);
1315 static int domain_context_mapping_one(struct dmar_domain *domain,
1318 struct context_entry *context;
1319 unsigned long flags;
1320 struct intel_iommu *iommu;
1321 struct dma_pte *pgd;
1323 unsigned long ndomains;
1327 pr_debug("Set context mapping for %02x:%02x.%d\n",
1328 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1329 BUG_ON(!domain->pgd);
1331 iommu = device_to_iommu(bus, devfn);
1335 context = device_to_context_entry(iommu, bus, devfn);
1338 spin_lock_irqsave(&iommu->lock, flags);
1339 if (context_present(context)) {
1340 spin_unlock_irqrestore(&iommu->lock, flags);
1347 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1350 /* find an available domain id for this device in iommu */
1351 ndomains = cap_ndoms(iommu->cap);
1352 num = find_first_bit(iommu->domain_ids, ndomains);
1353 for (; num < ndomains; ) {
1354 if (iommu->domains[num] == domain) {
1359 num = find_next_bit(iommu->domain_ids,
1360 cap_ndoms(iommu->cap), num+1);
1364 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1365 if (num >= ndomains) {
1366 spin_unlock_irqrestore(&iommu->lock, flags);
1367 printk(KERN_ERR "IOMMU: no free domain ids\n");
1371 set_bit(num, iommu->domain_ids);
1372 iommu->domains[num] = domain;
1376 /* Skip top levels of page tables for
1377 * iommu which has less agaw than default.
1379 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1380 pgd = phys_to_virt(dma_pte_addr(pgd));
1381 if (!dma_pte_present(pgd)) {
1382 spin_unlock_irqrestore(&iommu->lock, flags);
1388 context_set_domain_id(context, id);
1389 context_set_address_width(context, iommu->agaw);
1390 context_set_address_root(context, virt_to_phys(pgd));
1391 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
1392 context_set_fault_enable(context);
1393 context_set_present(context);
1394 domain_flush_cache(domain, context, sizeof(*context));
1396 /* it's a non-present to present mapping */
1397 if (iommu->flush.flush_context(iommu, domain->id,
1398 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1399 DMA_CCMD_DEVICE_INVL, 1))
1400 iommu_flush_write_buffer(iommu);
1402 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1404 spin_unlock_irqrestore(&iommu->lock, flags);
1406 spin_lock_irqsave(&domain->iommu_lock, flags);
1407 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1408 domain->iommu_count++;
1409 domain_update_iommu_cap(domain);
1411 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1416 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1419 struct pci_dev *tmp, *parent;
1421 ret = domain_context_mapping_one(domain, pdev->bus->number,
1426 /* dependent device mapping */
1427 tmp = pci_find_upstream_pcie_bridge(pdev);
1430 /* Secondary interface's bus number and devfn 0 */
1431 parent = pdev->bus->self;
1432 while (parent != tmp) {
1433 ret = domain_context_mapping_one(domain, parent->bus->number,
1437 parent = parent->bus->self;
1439 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1440 return domain_context_mapping_one(domain,
1441 tmp->subordinate->number, 0);
1442 else /* this is a legacy PCI bridge */
1443 return domain_context_mapping_one(domain,
1444 tmp->bus->number, tmp->devfn);
1447 static int domain_context_mapped(struct pci_dev *pdev)
1450 struct pci_dev *tmp, *parent;
1451 struct intel_iommu *iommu;
1453 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
1457 ret = device_context_mapped(iommu,
1458 pdev->bus->number, pdev->devfn);
1461 /* dependent device mapping */
1462 tmp = pci_find_upstream_pcie_bridge(pdev);
1465 /* Secondary interface's bus number and devfn 0 */
1466 parent = pdev->bus->self;
1467 while (parent != tmp) {
1468 ret = device_context_mapped(iommu, parent->bus->number,
1472 parent = parent->bus->self;
1475 return device_context_mapped(iommu,
1476 tmp->subordinate->number, 0);
1478 return device_context_mapped(iommu,
1479 tmp->bus->number, tmp->devfn);
1483 domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1484 u64 hpa, size_t size, int prot)
1486 u64 start_pfn, end_pfn;
1487 struct dma_pte *pte;
1489 int addr_width = agaw_to_width(domain->agaw);
1491 hpa &= (((u64)1) << addr_width) - 1;
1493 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1496 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1497 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1499 while (start_pfn < end_pfn) {
1500 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1503 /* We don't need lock here, nobody else
1504 * touches the iova range
1506 BUG_ON(dma_pte_addr(pte));
1507 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1508 dma_set_pte_prot(pte, prot);
1509 if (prot & DMA_PTE_SNP)
1510 dma_set_pte_snp(pte);
1511 domain_flush_cache(domain, pte, sizeof(*pte));
1518 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1523 clear_context_table(iommu, bus, devfn);
1524 iommu->flush.flush_context(iommu, 0, 0, 0,
1525 DMA_CCMD_GLOBAL_INVL, 0);
1526 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1527 DMA_TLB_GLOBAL_FLUSH, 0);
1530 static void domain_remove_dev_info(struct dmar_domain *domain)
1532 struct device_domain_info *info;
1533 unsigned long flags;
1534 struct intel_iommu *iommu;
1536 spin_lock_irqsave(&device_domain_lock, flags);
1537 while (!list_empty(&domain->devices)) {
1538 info = list_entry(domain->devices.next,
1539 struct device_domain_info, link);
1540 list_del(&info->link);
1541 list_del(&info->global);
1543 info->dev->dev.archdata.iommu = NULL;
1544 spin_unlock_irqrestore(&device_domain_lock, flags);
1546 iommu = device_to_iommu(info->bus, info->devfn);
1547 iommu_detach_dev(iommu, info->bus, info->devfn);
1548 free_devinfo_mem(info);
1550 spin_lock_irqsave(&device_domain_lock, flags);
1552 spin_unlock_irqrestore(&device_domain_lock, flags);
1557 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1559 static struct dmar_domain *
1560 find_domain(struct pci_dev *pdev)
1562 struct device_domain_info *info;
1564 /* No lock here, assumes no domain exit in normal case */
1565 info = pdev->dev.archdata.iommu;
1567 return info->domain;
1571 /* domain is initialized */
1572 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1574 struct dmar_domain *domain, *found = NULL;
1575 struct intel_iommu *iommu;
1576 struct dmar_drhd_unit *drhd;
1577 struct device_domain_info *info, *tmp;
1578 struct pci_dev *dev_tmp;
1579 unsigned long flags;
1580 int bus = 0, devfn = 0;
1582 domain = find_domain(pdev);
1586 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1588 if (dev_tmp->is_pcie) {
1589 bus = dev_tmp->subordinate->number;
1592 bus = dev_tmp->bus->number;
1593 devfn = dev_tmp->devfn;
1595 spin_lock_irqsave(&device_domain_lock, flags);
1596 list_for_each_entry(info, &device_domain_list, global) {
1597 if (info->bus == bus && info->devfn == devfn) {
1598 found = info->domain;
1602 spin_unlock_irqrestore(&device_domain_lock, flags);
1603 /* pcie-pci bridge already has a domain, uses it */
1610 /* Allocate new domain for the device */
1611 drhd = dmar_find_matched_drhd_unit(pdev);
1613 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1617 iommu = drhd->iommu;
1619 domain = iommu_alloc_domain(iommu);
1623 if (domain_init(domain, gaw)) {
1624 domain_exit(domain);
1628 /* register pcie-to-pci device */
1630 info = alloc_devinfo_mem();
1632 domain_exit(domain);
1636 info->devfn = devfn;
1638 info->domain = domain;
1639 /* This domain is shared by devices under p2p bridge */
1640 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1642 /* pcie-to-pci bridge already has a domain, uses it */
1644 spin_lock_irqsave(&device_domain_lock, flags);
1645 list_for_each_entry(tmp, &device_domain_list, global) {
1646 if (tmp->bus == bus && tmp->devfn == devfn) {
1647 found = tmp->domain;
1652 free_devinfo_mem(info);
1653 domain_exit(domain);
1656 list_add(&info->link, &domain->devices);
1657 list_add(&info->global, &device_domain_list);
1659 spin_unlock_irqrestore(&device_domain_lock, flags);
1663 info = alloc_devinfo_mem();
1666 info->bus = pdev->bus->number;
1667 info->devfn = pdev->devfn;
1669 info->domain = domain;
1670 spin_lock_irqsave(&device_domain_lock, flags);
1671 /* somebody is fast */
1672 found = find_domain(pdev);
1673 if (found != NULL) {
1674 spin_unlock_irqrestore(&device_domain_lock, flags);
1675 if (found != domain) {
1676 domain_exit(domain);
1679 free_devinfo_mem(info);
1682 list_add(&info->link, &domain->devices);
1683 list_add(&info->global, &device_domain_list);
1684 pdev->dev.archdata.iommu = info;
1685 spin_unlock_irqrestore(&device_domain_lock, flags);
1688 /* recheck it here, maybe others set it */
1689 return find_domain(pdev);
1692 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1693 unsigned long long start,
1694 unsigned long long end)
1696 struct dmar_domain *domain;
1698 unsigned long long base;
1702 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1703 pci_name(pdev), start, end);
1704 /* page table init */
1705 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1709 /* The address might not be aligned */
1710 base = start & PAGE_MASK;
1712 size = PAGE_ALIGN(size);
1713 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1714 IOVA_PFN(base + size) - 1)) {
1715 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1720 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1721 size, base, pci_name(pdev));
1723 * RMRR range might have overlap with physical memory range,
1726 dma_pte_clear_range(domain, base, base + size);
1728 ret = domain_page_mapping(domain, base, base, size,
1729 DMA_PTE_READ|DMA_PTE_WRITE);
1733 /* context entry init */
1734 ret = domain_context_mapping(domain, pdev);
1738 domain_exit(domain);
1743 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1744 struct pci_dev *pdev)
1746 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1748 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1749 rmrr->end_address + 1);
1752 #ifdef CONFIG_DMAR_GFX_WA
1753 struct iommu_prepare_data {
1754 struct pci_dev *pdev;
1758 static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1759 unsigned long end_pfn, void *datax)
1761 struct iommu_prepare_data *data;
1763 data = (struct iommu_prepare_data *)datax;
1765 data->ret = iommu_prepare_identity_map(data->pdev,
1766 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1771 static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1774 struct iommu_prepare_data data;
1779 for_each_online_node(nid) {
1780 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1787 static void __init iommu_prepare_gfx_mapping(void)
1789 struct pci_dev *pdev = NULL;
1792 for_each_pci_dev(pdev) {
1793 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
1794 !IS_GFX_DEVICE(pdev))
1796 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1798 ret = iommu_prepare_with_active_regions(pdev);
1800 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
1803 #else /* !CONFIG_DMAR_GFX_WA */
1804 static inline void iommu_prepare_gfx_mapping(void)
1810 #ifdef CONFIG_DMAR_FLOPPY_WA
1811 static inline void iommu_prepare_isa(void)
1813 struct pci_dev *pdev;
1816 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1820 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1821 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1824 printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
1825 "floppy might not work\n");
1829 static inline void iommu_prepare_isa(void)
1833 #endif /* !CONFIG_DMAR_FLPY_WA */
1835 static int __init init_dmars(void)
1837 struct dmar_drhd_unit *drhd;
1838 struct dmar_rmrr_unit *rmrr;
1839 struct pci_dev *pdev;
1840 struct intel_iommu *iommu;
1846 * initialize and program root entry to not present
1849 for_each_drhd_unit(drhd) {
1852 * lock not needed as this is only incremented in the single
1853 * threaded kernel __init code path all other access are read
1858 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
1861 printk(KERN_ERR "Allocating global iommu array failed\n");
1866 deferred_flush = kzalloc(g_num_of_iommus *
1867 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1868 if (!deferred_flush) {
1874 for_each_drhd_unit(drhd) {
1878 iommu = drhd->iommu;
1879 g_iommus[iommu->seq_id] = iommu;
1881 ret = iommu_init_domains(iommu);
1887 * we could share the same root & context tables
1888 * amoung all IOMMU's. Need to Split it later.
1890 ret = iommu_alloc_root_entry(iommu);
1892 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
1898 * Start from the sane iommu hardware state.
1900 for_each_drhd_unit(drhd) {
1904 iommu = drhd->iommu;
1907 * If the queued invalidation is already initialized by us
1908 * (for example, while enabling interrupt-remapping) then
1909 * we got the things already rolling from a sane state.
1915 * Clear any previous faults.
1917 dmar_fault(-1, iommu);
1919 * Disable queued invalidation if supported and already enabled
1920 * before OS handover.
1922 dmar_disable_qi(iommu);
1925 for_each_drhd_unit(drhd) {
1929 iommu = drhd->iommu;
1931 if (dmar_enable_qi(iommu)) {
1933 * Queued Invalidate not enabled, use Register Based
1936 iommu->flush.flush_context = __iommu_flush_context;
1937 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1938 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
1940 (unsigned long long)drhd->reg_base_addr);
1942 iommu->flush.flush_context = qi_flush_context;
1943 iommu->flush.flush_iotlb = qi_flush_iotlb;
1944 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
1946 (unsigned long long)drhd->reg_base_addr);
1952 * for each dev attached to rmrr
1954 * locate drhd for dev, alloc domain for dev
1955 * allocate free domain
1956 * allocate page table entries for rmrr
1957 * if context not allocated for bus
1958 * allocate and init context
1959 * set present in root table for this bus
1960 * init context with domain, translation etc
1964 for_each_rmrr_units(rmrr) {
1965 for (i = 0; i < rmrr->devices_cnt; i++) {
1966 pdev = rmrr->devices[i];
1967 /* some BIOS lists non-exist devices in DMAR table */
1970 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
1973 "IOMMU: mapping reserved region failed\n");
1977 iommu_prepare_gfx_mapping();
1979 iommu_prepare_isa();
1984 * global invalidate context cache
1985 * global invalidate iotlb
1986 * enable translation
1988 for_each_drhd_unit(drhd) {
1991 iommu = drhd->iommu;
1993 iommu_flush_write_buffer(iommu);
1995 ret = dmar_set_interrupt(iommu);
1999 iommu_set_root_entry(iommu);
2001 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
2003 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2005 iommu_disable_protect_mem_regions(iommu);
2007 ret = iommu_enable_translation(iommu);
2014 for_each_drhd_unit(drhd) {
2017 iommu = drhd->iommu;
2024 static inline u64 aligned_size(u64 host_addr, size_t size)
2027 addr = (host_addr & (~PAGE_MASK)) + size;
2028 return PAGE_ALIGN(addr);
2032 iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
2036 /* Make sure it's in range */
2037 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
2038 if (!size || (IOVA_START_ADDR + size > end))
2041 piova = alloc_iova(&domain->iovad,
2042 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
2046 static struct iova *
2047 __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
2048 size_t size, u64 dma_mask)
2050 struct pci_dev *pdev = to_pci_dev(dev);
2051 struct iova *iova = NULL;
2053 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
2054 iova = iommu_alloc_iova(domain, size, dma_mask);
2057 * First try to allocate an io virtual address in
2058 * DMA_32BIT_MASK and if that fails then try allocating
2061 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
2063 iova = iommu_alloc_iova(domain, size, dma_mask);
2067 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
2074 static struct dmar_domain *
2075 get_valid_domain_for_dev(struct pci_dev *pdev)
2077 struct dmar_domain *domain;
2080 domain = get_domain_for_dev(pdev,
2081 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2084 "Allocating domain for %s failed", pci_name(pdev));
2088 /* make sure context mapping is ok */
2089 if (unlikely(!domain_context_mapped(pdev))) {
2090 ret = domain_context_mapping(domain, pdev);
2093 "Domain context map for %s failed",
2102 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2103 size_t size, int dir, u64 dma_mask)
2105 struct pci_dev *pdev = to_pci_dev(hwdev);
2106 struct dmar_domain *domain;
2107 phys_addr_t start_paddr;
2111 struct intel_iommu *iommu;
2113 BUG_ON(dir == DMA_NONE);
2114 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2117 domain = get_valid_domain_for_dev(pdev);
2121 iommu = domain_get_iommu(domain);
2122 size = aligned_size((u64)paddr, size);
2124 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2128 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2131 * Check if DMAR supports zero-length reads on write only
2134 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2135 !cap_zlr(iommu->cap))
2136 prot |= DMA_PTE_READ;
2137 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2138 prot |= DMA_PTE_WRITE;
2140 * paddr - (paddr + size) might be partial page, we should map the whole
2141 * page. Note: if two part of one page are separately mapped, we
2142 * might have two guest_addr mapping to the same host paddr, but this
2143 * is not a big problem
2145 ret = domain_page_mapping(domain, start_paddr,
2146 ((u64)paddr) & PAGE_MASK, size, prot);
2150 /* it's a non-present to present mapping */
2151 ret = iommu_flush_iotlb_psi(iommu, domain->id,
2152 start_paddr, size >> VTD_PAGE_SHIFT, 1);
2154 iommu_flush_write_buffer(iommu);
2156 return start_paddr + ((u64)paddr & (~PAGE_MASK));
2160 __free_iova(&domain->iovad, iova);
2161 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2162 pci_name(pdev), size, (unsigned long long)paddr, dir);
2166 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2167 unsigned long offset, size_t size,
2168 enum dma_data_direction dir,
2169 struct dma_attrs *attrs)
2171 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2172 dir, to_pci_dev(dev)->dma_mask);
2175 static void flush_unmaps(void)
2181 /* just flush them all */
2182 for (i = 0; i < g_num_of_iommus; i++) {
2183 struct intel_iommu *iommu = g_iommus[i];
2187 if (deferred_flush[i].next) {
2188 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2189 DMA_TLB_GLOBAL_FLUSH, 0);
2190 for (j = 0; j < deferred_flush[i].next; j++) {
2191 __free_iova(&deferred_flush[i].domain[j]->iovad,
2192 deferred_flush[i].iova[j]);
2194 deferred_flush[i].next = 0;
2201 static void flush_unmaps_timeout(unsigned long data)
2203 unsigned long flags;
2205 spin_lock_irqsave(&async_umap_flush_lock, flags);
2207 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2210 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2212 unsigned long flags;
2214 struct intel_iommu *iommu;
2216 spin_lock_irqsave(&async_umap_flush_lock, flags);
2217 if (list_size == HIGH_WATER_MARK)
2220 iommu = domain_get_iommu(dom);
2221 iommu_id = iommu->seq_id;
2223 next = deferred_flush[iommu_id].next;
2224 deferred_flush[iommu_id].domain[next] = dom;
2225 deferred_flush[iommu_id].iova[next] = iova;
2226 deferred_flush[iommu_id].next++;
2229 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2233 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2236 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2237 size_t size, enum dma_data_direction dir,
2238 struct dma_attrs *attrs)
2240 struct pci_dev *pdev = to_pci_dev(dev);
2241 struct dmar_domain *domain;
2242 unsigned long start_addr;
2244 struct intel_iommu *iommu;
2246 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2248 domain = find_domain(pdev);
2251 iommu = domain_get_iommu(domain);
2253 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2257 start_addr = iova->pfn_lo << PAGE_SHIFT;
2258 size = aligned_size((u64)dev_addr, size);
2260 pr_debug("Device %s unmapping: %zx@%llx\n",
2261 pci_name(pdev), size, (unsigned long long)start_addr);
2263 /* clear the whole page */
2264 dma_pte_clear_range(domain, start_addr, start_addr + size);
2265 /* free page tables */
2266 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2267 if (intel_iommu_strict) {
2268 if (iommu_flush_iotlb_psi(iommu,
2269 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
2270 iommu_flush_write_buffer(iommu);
2272 __free_iova(&domain->iovad, iova);
2274 add_unmap(domain, iova);
2276 * queue up the release of the unmap to save the 1/6th of the
2277 * cpu used up by the iotlb flush operation...
2282 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2285 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2288 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2289 dma_addr_t *dma_handle, gfp_t flags)
2294 size = PAGE_ALIGN(size);
2295 order = get_order(size);
2296 flags &= ~(GFP_DMA | GFP_DMA32);
2298 vaddr = (void *)__get_free_pages(flags, order);
2301 memset(vaddr, 0, size);
2303 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2305 hwdev->coherent_dma_mask);
2308 free_pages((unsigned long)vaddr, order);
2312 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2313 dma_addr_t dma_handle)
2317 size = PAGE_ALIGN(size);
2318 order = get_order(size);
2320 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2321 free_pages((unsigned long)vaddr, order);
2324 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2325 int nelems, enum dma_data_direction dir,
2326 struct dma_attrs *attrs)
2329 struct pci_dev *pdev = to_pci_dev(hwdev);
2330 struct dmar_domain *domain;
2331 unsigned long start_addr;
2335 struct scatterlist *sg;
2336 struct intel_iommu *iommu;
2338 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2341 domain = find_domain(pdev);
2344 iommu = domain_get_iommu(domain);
2346 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2349 for_each_sg(sglist, sg, nelems, i) {
2350 addr = page_to_phys(sg_page(sg)) + sg->offset;
2351 size += aligned_size((u64)addr, sg->length);
2354 start_addr = iova->pfn_lo << PAGE_SHIFT;
2356 /* clear the whole page */
2357 dma_pte_clear_range(domain, start_addr, start_addr + size);
2358 /* free page tables */
2359 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2361 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
2362 size >> VTD_PAGE_SHIFT, 0))
2363 iommu_flush_write_buffer(iommu);
2366 __free_iova(&domain->iovad, iova);
2369 static int intel_nontranslate_map_sg(struct device *hddev,
2370 struct scatterlist *sglist, int nelems, int dir)
2373 struct scatterlist *sg;
2375 for_each_sg(sglist, sg, nelems, i) {
2376 BUG_ON(!sg_page(sg));
2377 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2378 sg->dma_length = sg->length;
2383 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2384 enum dma_data_direction dir, struct dma_attrs *attrs)
2388 struct pci_dev *pdev = to_pci_dev(hwdev);
2389 struct dmar_domain *domain;
2393 struct iova *iova = NULL;
2395 struct scatterlist *sg;
2396 unsigned long start_addr;
2397 struct intel_iommu *iommu;
2399 BUG_ON(dir == DMA_NONE);
2400 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2401 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2403 domain = get_valid_domain_for_dev(pdev);
2407 iommu = domain_get_iommu(domain);
2409 for_each_sg(sglist, sg, nelems, i) {
2410 addr = page_to_phys(sg_page(sg)) + sg->offset;
2411 size += aligned_size((u64)addr, sg->length);
2414 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2416 sglist->dma_length = 0;
2421 * Check if DMAR supports zero-length reads on write only
2424 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2425 !cap_zlr(iommu->cap))
2426 prot |= DMA_PTE_READ;
2427 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2428 prot |= DMA_PTE_WRITE;
2430 start_addr = iova->pfn_lo << PAGE_SHIFT;
2432 for_each_sg(sglist, sg, nelems, i) {
2433 addr = page_to_phys(sg_page(sg)) + sg->offset;
2434 size = aligned_size((u64)addr, sg->length);
2435 ret = domain_page_mapping(domain, start_addr + offset,
2436 ((u64)addr) & PAGE_MASK,
2439 /* clear the page */
2440 dma_pte_clear_range(domain, start_addr,
2441 start_addr + offset);
2442 /* free page tables */
2443 dma_pte_free_pagetable(domain, start_addr,
2444 start_addr + offset);
2446 __free_iova(&domain->iovad, iova);
2449 sg->dma_address = start_addr + offset +
2450 ((u64)addr & (~PAGE_MASK));
2451 sg->dma_length = sg->length;
2455 /* it's a non-present to present mapping */
2456 if (iommu_flush_iotlb_psi(iommu, domain->id,
2457 start_addr, offset >> VTD_PAGE_SHIFT, 1))
2458 iommu_flush_write_buffer(iommu);
2462 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2467 struct dma_map_ops intel_dma_ops = {
2468 .alloc_coherent = intel_alloc_coherent,
2469 .free_coherent = intel_free_coherent,
2470 .map_sg = intel_map_sg,
2471 .unmap_sg = intel_unmap_sg,
2472 .map_page = intel_map_page,
2473 .unmap_page = intel_unmap_page,
2474 .mapping_error = intel_mapping_error,
2477 static inline int iommu_domain_cache_init(void)
2481 iommu_domain_cache = kmem_cache_create("iommu_domain",
2482 sizeof(struct dmar_domain),
2487 if (!iommu_domain_cache) {
2488 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2495 static inline int iommu_devinfo_cache_init(void)
2499 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2500 sizeof(struct device_domain_info),
2504 if (!iommu_devinfo_cache) {
2505 printk(KERN_ERR "Couldn't create devinfo cache\n");
2512 static inline int iommu_iova_cache_init(void)
2516 iommu_iova_cache = kmem_cache_create("iommu_iova",
2517 sizeof(struct iova),
2521 if (!iommu_iova_cache) {
2522 printk(KERN_ERR "Couldn't create iova cache\n");
2529 static int __init iommu_init_mempool(void)
2532 ret = iommu_iova_cache_init();
2536 ret = iommu_domain_cache_init();
2540 ret = iommu_devinfo_cache_init();
2544 kmem_cache_destroy(iommu_domain_cache);
2546 kmem_cache_destroy(iommu_iova_cache);
2551 static void __init iommu_exit_mempool(void)
2553 kmem_cache_destroy(iommu_devinfo_cache);
2554 kmem_cache_destroy(iommu_domain_cache);
2555 kmem_cache_destroy(iommu_iova_cache);
2559 static void __init init_no_remapping_devices(void)
2561 struct dmar_drhd_unit *drhd;
2563 for_each_drhd_unit(drhd) {
2564 if (!drhd->include_all) {
2566 for (i = 0; i < drhd->devices_cnt; i++)
2567 if (drhd->devices[i] != NULL)
2569 /* ignore DMAR unit if no pci devices exist */
2570 if (i == drhd->devices_cnt)
2578 for_each_drhd_unit(drhd) {
2580 if (drhd->ignored || drhd->include_all)
2583 for (i = 0; i < drhd->devices_cnt; i++)
2584 if (drhd->devices[i] &&
2585 !IS_GFX_DEVICE(drhd->devices[i]))
2588 if (i < drhd->devices_cnt)
2591 /* bypass IOMMU if it is just for gfx devices */
2593 for (i = 0; i < drhd->devices_cnt; i++) {
2594 if (!drhd->devices[i])
2596 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
2601 #ifdef CONFIG_SUSPEND
2602 static int init_iommu_hw(void)
2604 struct dmar_drhd_unit *drhd;
2605 struct intel_iommu *iommu = NULL;
2607 for_each_active_iommu(iommu, drhd)
2609 dmar_reenable_qi(iommu);
2611 for_each_active_iommu(iommu, drhd) {
2612 iommu_flush_write_buffer(iommu);
2614 iommu_set_root_entry(iommu);
2616 iommu->flush.flush_context(iommu, 0, 0, 0,
2617 DMA_CCMD_GLOBAL_INVL, 0);
2618 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2619 DMA_TLB_GLOBAL_FLUSH, 0);
2620 iommu_disable_protect_mem_regions(iommu);
2621 iommu_enable_translation(iommu);
2627 static void iommu_flush_all(void)
2629 struct dmar_drhd_unit *drhd;
2630 struct intel_iommu *iommu;
2632 for_each_active_iommu(iommu, drhd) {
2633 iommu->flush.flush_context(iommu, 0, 0, 0,
2634 DMA_CCMD_GLOBAL_INVL, 0);
2635 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2636 DMA_TLB_GLOBAL_FLUSH, 0);
2640 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
2642 struct dmar_drhd_unit *drhd;
2643 struct intel_iommu *iommu = NULL;
2646 for_each_active_iommu(iommu, drhd) {
2647 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
2649 if (!iommu->iommu_state)
2655 for_each_active_iommu(iommu, drhd) {
2656 iommu_disable_translation(iommu);
2658 spin_lock_irqsave(&iommu->register_lock, flag);
2660 iommu->iommu_state[SR_DMAR_FECTL_REG] =
2661 readl(iommu->reg + DMAR_FECTL_REG);
2662 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
2663 readl(iommu->reg + DMAR_FEDATA_REG);
2664 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
2665 readl(iommu->reg + DMAR_FEADDR_REG);
2666 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
2667 readl(iommu->reg + DMAR_FEUADDR_REG);
2669 spin_unlock_irqrestore(&iommu->register_lock, flag);
2674 for_each_active_iommu(iommu, drhd)
2675 kfree(iommu->iommu_state);
2680 static int iommu_resume(struct sys_device *dev)
2682 struct dmar_drhd_unit *drhd;
2683 struct intel_iommu *iommu = NULL;
2686 if (init_iommu_hw()) {
2687 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
2691 for_each_active_iommu(iommu, drhd) {
2693 spin_lock_irqsave(&iommu->register_lock, flag);
2695 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
2696 iommu->reg + DMAR_FECTL_REG);
2697 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
2698 iommu->reg + DMAR_FEDATA_REG);
2699 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
2700 iommu->reg + DMAR_FEADDR_REG);
2701 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
2702 iommu->reg + DMAR_FEUADDR_REG);
2704 spin_unlock_irqrestore(&iommu->register_lock, flag);
2707 for_each_active_iommu(iommu, drhd)
2708 kfree(iommu->iommu_state);
2713 static struct sysdev_class iommu_sysclass = {
2715 .resume = iommu_resume,
2716 .suspend = iommu_suspend,
2719 static struct sys_device device_iommu = {
2720 .cls = &iommu_sysclass,
2723 static int __init init_iommu_sysfs(void)
2727 error = sysdev_class_register(&iommu_sysclass);
2731 error = sysdev_register(&device_iommu);
2733 sysdev_class_unregister(&iommu_sysclass);
2739 static int __init init_iommu_sysfs(void)
2743 #endif /* CONFIG_PM */
2745 int __init intel_iommu_init(void)
2749 if (dmar_table_init())
2752 if (dmar_dev_scope_init())
2756 * Check the need for DMA-remapping initialization now.
2757 * Above initialization will also be used by Interrupt-remapping.
2759 if (no_iommu || swiotlb || dmar_disabled)
2762 iommu_init_mempool();
2763 dmar_init_reserved_ranges();
2765 init_no_remapping_devices();
2769 printk(KERN_ERR "IOMMU: dmar init failed\n");
2770 put_iova_domain(&reserved_iova_list);
2771 iommu_exit_mempool();
2775 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2777 init_timer(&unmap_timer);
2779 dma_ops = &intel_dma_ops;
2782 register_iommu(&intel_iommu_ops);
2787 static int vm_domain_add_dev_info(struct dmar_domain *domain,
2788 struct pci_dev *pdev)
2790 struct device_domain_info *info;
2791 unsigned long flags;
2793 info = alloc_devinfo_mem();
2797 info->bus = pdev->bus->number;
2798 info->devfn = pdev->devfn;
2800 info->domain = domain;
2802 spin_lock_irqsave(&device_domain_lock, flags);
2803 list_add(&info->link, &domain->devices);
2804 list_add(&info->global, &device_domain_list);
2805 pdev->dev.archdata.iommu = info;
2806 spin_unlock_irqrestore(&device_domain_lock, flags);
2811 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
2812 struct pci_dev *pdev)
2814 struct pci_dev *tmp, *parent;
2816 if (!iommu || !pdev)
2819 /* dependent device detach */
2820 tmp = pci_find_upstream_pcie_bridge(pdev);
2821 /* Secondary interface's bus number and devfn 0 */
2823 parent = pdev->bus->self;
2824 while (parent != tmp) {
2825 iommu_detach_dev(iommu, parent->bus->number,
2827 parent = parent->bus->self;
2829 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
2830 iommu_detach_dev(iommu,
2831 tmp->subordinate->number, 0);
2832 else /* this is a legacy PCI bridge */
2833 iommu_detach_dev(iommu,
2834 tmp->bus->number, tmp->devfn);
2838 static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2839 struct pci_dev *pdev)
2841 struct device_domain_info *info;
2842 struct intel_iommu *iommu;
2843 unsigned long flags;
2845 struct list_head *entry, *tmp;
2847 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
2851 spin_lock_irqsave(&device_domain_lock, flags);
2852 list_for_each_safe(entry, tmp, &domain->devices) {
2853 info = list_entry(entry, struct device_domain_info, link);
2854 if (info->bus == pdev->bus->number &&
2855 info->devfn == pdev->devfn) {
2856 list_del(&info->link);
2857 list_del(&info->global);
2859 info->dev->dev.archdata.iommu = NULL;
2860 spin_unlock_irqrestore(&device_domain_lock, flags);
2862 iommu_detach_dev(iommu, info->bus, info->devfn);
2863 iommu_detach_dependent_devices(iommu, pdev);
2864 free_devinfo_mem(info);
2866 spin_lock_irqsave(&device_domain_lock, flags);
2874 /* if there is no other devices under the same iommu
2875 * owned by this domain, clear this iommu in iommu_bmp
2876 * update iommu count and coherency
2878 if (device_to_iommu(info->bus, info->devfn) == iommu)
2883 unsigned long tmp_flags;
2884 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2885 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2886 domain->iommu_count--;
2887 domain_update_iommu_cap(domain);
2888 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2891 spin_unlock_irqrestore(&device_domain_lock, flags);
2894 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2896 struct device_domain_info *info;
2897 struct intel_iommu *iommu;
2898 unsigned long flags1, flags2;
2900 spin_lock_irqsave(&device_domain_lock, flags1);
2901 while (!list_empty(&domain->devices)) {
2902 info = list_entry(domain->devices.next,
2903 struct device_domain_info, link);
2904 list_del(&info->link);
2905 list_del(&info->global);
2907 info->dev->dev.archdata.iommu = NULL;
2909 spin_unlock_irqrestore(&device_domain_lock, flags1);
2911 iommu = device_to_iommu(info->bus, info->devfn);
2912 iommu_detach_dev(iommu, info->bus, info->devfn);
2913 iommu_detach_dependent_devices(iommu, info->dev);
2915 /* clear this iommu in iommu_bmp, update iommu count
2918 spin_lock_irqsave(&domain->iommu_lock, flags2);
2919 if (test_and_clear_bit(iommu->seq_id,
2920 &domain->iommu_bmp)) {
2921 domain->iommu_count--;
2922 domain_update_iommu_cap(domain);
2924 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2926 free_devinfo_mem(info);
2927 spin_lock_irqsave(&device_domain_lock, flags1);
2929 spin_unlock_irqrestore(&device_domain_lock, flags1);
2932 /* domain id for virtual machine, it won't be set in context */
2933 static unsigned long vm_domid;
2935 static int vm_domain_min_agaw(struct dmar_domain *domain)
2938 int min_agaw = domain->agaw;
2940 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
2941 for (; i < g_num_of_iommus; ) {
2942 if (min_agaw > g_iommus[i]->agaw)
2943 min_agaw = g_iommus[i]->agaw;
2945 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
2951 static struct dmar_domain *iommu_alloc_vm_domain(void)
2953 struct dmar_domain *domain;
2955 domain = alloc_domain_mem();
2959 domain->id = vm_domid++;
2960 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
2961 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
2966 static int vm_domain_init(struct dmar_domain *domain, int guest_width)
2970 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
2971 spin_lock_init(&domain->mapping_lock);
2972 spin_lock_init(&domain->iommu_lock);
2974 domain_reserve_special_ranges(domain);
2976 /* calculate AGAW */
2977 domain->gaw = guest_width;
2978 adjust_width = guestwidth_to_adjustwidth(guest_width);
2979 domain->agaw = width_to_agaw(adjust_width);
2981 INIT_LIST_HEAD(&domain->devices);
2983 domain->iommu_count = 0;
2984 domain->iommu_coherency = 0;
2985 domain->max_addr = 0;
2987 /* always allocate the top pgd */
2988 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
2991 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
2995 static void iommu_free_vm_domain(struct dmar_domain *domain)
2997 unsigned long flags;
2998 struct dmar_drhd_unit *drhd;
2999 struct intel_iommu *iommu;
3001 unsigned long ndomains;
3003 for_each_drhd_unit(drhd) {
3006 iommu = drhd->iommu;
3008 ndomains = cap_ndoms(iommu->cap);
3009 i = find_first_bit(iommu->domain_ids, ndomains);
3010 for (; i < ndomains; ) {
3011 if (iommu->domains[i] == domain) {
3012 spin_lock_irqsave(&iommu->lock, flags);
3013 clear_bit(i, iommu->domain_ids);
3014 iommu->domains[i] = NULL;
3015 spin_unlock_irqrestore(&iommu->lock, flags);
3018 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3023 static void vm_domain_exit(struct dmar_domain *domain)
3027 /* Domain 0 is reserved, so dont process it */
3031 vm_domain_remove_all_dev_info(domain);
3033 put_iova_domain(&domain->iovad);
3034 end = DOMAIN_MAX_ADDR(domain->gaw);
3035 end = end & (~VTD_PAGE_MASK);
3038 dma_pte_clear_range(domain, 0, end);
3040 /* free page tables */
3041 dma_pte_free_pagetable(domain, 0, end);
3043 iommu_free_vm_domain(domain);
3044 free_domain_mem(domain);
3047 static int intel_iommu_domain_init(struct iommu_domain *domain)
3049 struct dmar_domain *dmar_domain;
3051 dmar_domain = iommu_alloc_vm_domain();
3054 "intel_iommu_domain_init: dmar_domain == NULL\n");
3057 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3059 "intel_iommu_domain_init() failed\n");
3060 vm_domain_exit(dmar_domain);
3063 domain->priv = dmar_domain;
3068 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3070 struct dmar_domain *dmar_domain = domain->priv;
3072 domain->priv = NULL;
3073 vm_domain_exit(dmar_domain);
3076 static int intel_iommu_attach_device(struct iommu_domain *domain,
3079 struct dmar_domain *dmar_domain = domain->priv;
3080 struct pci_dev *pdev = to_pci_dev(dev);
3081 struct intel_iommu *iommu;
3086 /* normally pdev is not mapped */
3087 if (unlikely(domain_context_mapped(pdev))) {
3088 struct dmar_domain *old_domain;
3090 old_domain = find_domain(pdev);
3092 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
3093 vm_domain_remove_one_dev_info(old_domain, pdev);
3095 domain_remove_dev_info(old_domain);
3099 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
3103 /* check if this iommu agaw is sufficient for max mapped address */
3104 addr_width = agaw_to_width(iommu->agaw);
3105 end = DOMAIN_MAX_ADDR(addr_width);
3106 end = end & VTD_PAGE_MASK;
3107 if (end < dmar_domain->max_addr) {
3108 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3109 "sufficient for the mapped address (%llx)\n",
3110 __func__, iommu->agaw, dmar_domain->max_addr);
3114 ret = domain_context_mapping(dmar_domain, pdev);
3118 ret = vm_domain_add_dev_info(dmar_domain, pdev);
3122 static void intel_iommu_detach_device(struct iommu_domain *domain,
3125 struct dmar_domain *dmar_domain = domain->priv;
3126 struct pci_dev *pdev = to_pci_dev(dev);
3128 vm_domain_remove_one_dev_info(dmar_domain, pdev);
3131 static int intel_iommu_map_range(struct iommu_domain *domain,
3132 unsigned long iova, phys_addr_t hpa,
3133 size_t size, int iommu_prot)
3135 struct dmar_domain *dmar_domain = domain->priv;
3141 if (iommu_prot & IOMMU_READ)
3142 prot |= DMA_PTE_READ;
3143 if (iommu_prot & IOMMU_WRITE)
3144 prot |= DMA_PTE_WRITE;
3145 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3146 prot |= DMA_PTE_SNP;
3148 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
3149 if (dmar_domain->max_addr < max_addr) {
3153 /* check if minimum agaw is sufficient for mapped address */
3154 min_agaw = vm_domain_min_agaw(dmar_domain);
3155 addr_width = agaw_to_width(min_agaw);
3156 end = DOMAIN_MAX_ADDR(addr_width);
3157 end = end & VTD_PAGE_MASK;
3158 if (end < max_addr) {
3159 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3160 "sufficient for the mapped address (%llx)\n",
3161 __func__, min_agaw, max_addr);
3164 dmar_domain->max_addr = max_addr;
3167 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
3171 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3172 unsigned long iova, size_t size)
3174 struct dmar_domain *dmar_domain = domain->priv;
3177 /* The address might not be aligned */
3178 base = iova & VTD_PAGE_MASK;
3179 size = VTD_PAGE_ALIGN(size);
3180 dma_pte_clear_range(dmar_domain, base, base + size);
3182 if (dmar_domain->max_addr == base + size)
3183 dmar_domain->max_addr = base;
3186 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3189 struct dmar_domain *dmar_domain = domain->priv;
3190 struct dma_pte *pte;
3193 pte = addr_to_dma_pte(dmar_domain, iova);
3195 phys = dma_pte_addr(pte);
3200 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3203 struct dmar_domain *dmar_domain = domain->priv;
3205 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3206 return dmar_domain->iommu_snooping;
3211 static struct iommu_ops intel_iommu_ops = {
3212 .domain_init = intel_iommu_domain_init,
3213 .domain_destroy = intel_iommu_domain_destroy,
3214 .attach_dev = intel_iommu_attach_device,
3215 .detach_dev = intel_iommu_detach_device,
3216 .map = intel_iommu_map_range,
3217 .unmap = intel_iommu_unmap_range,
3218 .iova_to_phys = intel_iommu_iova_to_phys,
3219 .domain_has_cap = intel_iommu_domain_has_cap,
3222 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3225 * Mobile 4 Series Chipset neglects to set RWBF capability,
3228 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3232 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);