2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
38 #define PREFIX "DMAR:"
40 /* No locks are needed as DMA remapping hardware unit
41 * list is constructed at boot time and hotplug of
42 * these units are not supported by the architecture.
44 LIST_HEAD(dmar_drhd_units);
46 static struct acpi_table_header * __initdata dmar_tbl;
47 static acpi_size dmar_tbl_size;
49 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
52 * add INCLUDE_ALL at the tail, so scan the list will find it at
55 if (drhd->include_all)
56 list_add_tail(&drhd->list, &dmar_drhd_units);
58 list_add(&drhd->list, &dmar_drhd_units);
61 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
62 struct pci_dev **dev, u16 segment)
65 struct pci_dev *pdev = NULL;
66 struct acpi_dmar_pci_path *path;
69 bus = pci_find_bus(segment, scope->bus);
70 path = (struct acpi_dmar_pci_path *)(scope + 1);
71 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
72 / sizeof(struct acpi_dmar_pci_path);
78 * Some BIOSes list non-exist devices in DMAR table, just
83 PREFIX "Device scope bus [%d] not found\n",
87 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 printk(KERN_WARNING PREFIX
90 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
91 segment, bus->number, path->dev, path->fn);
96 bus = pdev->subordinate;
99 printk(KERN_WARNING PREFIX
100 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
101 segment, scope->bus, path->dev, path->fn);
105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106 pdev->subordinate) || (scope->entry_type == \
107 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 printk(KERN_WARNING PREFIX
110 "Device scope type does not match for %s\n",
118 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
119 struct pci_dev ***devices, u16 segment)
121 struct acpi_dmar_device_scope *scope;
127 while (start < end) {
129 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
130 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 printk(KERN_WARNING PREFIX
134 "Unsupported device scope\n");
135 start += scope->length;
140 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
146 while (start < end) {
148 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150 ret = dmar_parse_one_dev_scope(scope,
151 &(*devices)[index], segment);
158 start += scope->length;
165 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
166 * structure which uniquely represent one DMA remapping hardware unit
167 * present in the platform
170 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 struct acpi_dmar_hardware_unit *drhd;
173 struct dmar_drhd_unit *dmaru;
176 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
181 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address;
183 dmaru->segment = drhd->segment;
184 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186 ret = alloc_iommu(dmaru);
191 dmar_register_drhd_unit(dmaru);
195 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
197 struct acpi_dmar_hardware_unit *drhd;
200 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202 if (dmaru->include_all)
205 ret = dmar_parse_dev_scope((void *)(drhd + 1),
206 ((void *)drhd) + drhd->header.length,
207 &dmaru->devices_cnt, &dmaru->devices,
210 list_del(&dmaru->list);
217 LIST_HEAD(dmar_rmrr_units);
219 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
221 list_add(&rmrr->list, &dmar_rmrr_units);
226 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
228 struct acpi_dmar_reserved_memory *rmrr;
229 struct dmar_rmrr_unit *rmrru;
231 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
236 rmrr = (struct acpi_dmar_reserved_memory *)header;
237 rmrru->base_address = rmrr->base_address;
238 rmrru->end_address = rmrr->end_address;
240 dmar_register_rmrr_unit(rmrru);
245 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
247 struct acpi_dmar_reserved_memory *rmrr;
250 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
251 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
252 ((void *)rmrr) + rmrr->header.length,
253 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
255 if (ret || (rmrru->devices_cnt == 0)) {
256 list_del(&rmrru->list);
264 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
266 struct acpi_dmar_hardware_unit *drhd;
267 struct acpi_dmar_reserved_memory *rmrr;
269 switch (header->type) {
270 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
271 drhd = (struct acpi_dmar_hardware_unit *)header;
272 printk (KERN_INFO PREFIX
273 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
274 drhd->flags, (unsigned long long)drhd->address);
276 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
277 rmrr = (struct acpi_dmar_reserved_memory *)header;
279 printk (KERN_INFO PREFIX
280 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
281 (unsigned long long)rmrr->base_address,
282 (unsigned long long)rmrr->end_address);
288 * dmar_table_detect - checks to see if the platform supports DMAR devices
290 static int __init dmar_table_detect(void)
292 acpi_status status = AE_OK;
294 /* if we could find DMAR table, then there are DMAR devices */
295 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
296 (struct acpi_table_header **)&dmar_tbl,
299 if (ACPI_SUCCESS(status) && !dmar_tbl) {
300 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
301 status = AE_NOT_FOUND;
304 return (ACPI_SUCCESS(status) ? 1 : 0);
308 * parse_dmar_table - parses the DMA reporting table
311 parse_dmar_table(void)
313 struct acpi_table_dmar *dmar;
314 struct acpi_dmar_header *entry_header;
318 * Do it again, earlier dmar_tbl mapping could be mapped with
323 dmar = (struct acpi_table_dmar *)dmar_tbl;
327 if (dmar->width < PAGE_SHIFT - 1) {
328 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
332 printk (KERN_INFO PREFIX "Host address width %d\n",
335 entry_header = (struct acpi_dmar_header *)(dmar + 1);
336 while (((unsigned long)entry_header) <
337 (((unsigned long)dmar) + dmar_tbl->length)) {
338 /* Avoid looping forever on bad ACPI tables */
339 if (entry_header->length == 0) {
340 printk(KERN_WARNING PREFIX
341 "Invalid 0-length structure\n");
346 dmar_table_print_dmar_entry(entry_header);
348 switch (entry_header->type) {
349 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
350 ret = dmar_parse_one_drhd(entry_header);
352 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
354 ret = dmar_parse_one_rmrr(entry_header);
358 printk(KERN_WARNING PREFIX
359 "Unknown DMAR structure type\n");
360 ret = 0; /* for forward compatibility */
366 entry_header = ((void *)entry_header + entry_header->length);
371 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
377 for (index = 0; index < cnt; index++)
378 if (dev == devices[index])
381 /* Check our parent */
382 dev = dev->bus->self;
388 struct dmar_drhd_unit *
389 dmar_find_matched_drhd_unit(struct pci_dev *dev)
391 struct dmar_drhd_unit *dmaru = NULL;
392 struct acpi_dmar_hardware_unit *drhd;
394 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
395 drhd = container_of(dmaru->hdr,
396 struct acpi_dmar_hardware_unit,
399 if (dmaru->include_all &&
400 drhd->segment == pci_domain_nr(dev->bus))
403 if (dmar_pci_device_match(dmaru->devices,
404 dmaru->devices_cnt, dev))
411 int __init dmar_dev_scope_init(void)
413 struct dmar_drhd_unit *drhd, *drhd_n;
416 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
417 ret = dmar_parse_dev(drhd);
424 struct dmar_rmrr_unit *rmrr, *rmrr_n;
425 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
426 ret = rmrr_parse_dev(rmrr);
437 int __init dmar_table_init(void)
439 static int dmar_table_initialized;
442 if (dmar_table_initialized)
445 dmar_table_initialized = 1;
447 ret = parse_dmar_table();
450 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
454 if (list_empty(&dmar_drhd_units)) {
455 printk(KERN_INFO PREFIX "No DMAR devices found\n");
460 if (list_empty(&dmar_rmrr_units))
461 printk(KERN_INFO PREFIX "No RMRR found\n");
464 #ifdef CONFIG_INTR_REMAP
465 parse_ioapics_under_ir();
470 void __init detect_intel_iommu(void)
474 ret = dmar_table_detect();
477 #ifdef CONFIG_INTR_REMAP
478 struct acpi_table_dmar *dmar;
480 * for now we will disable dma-remapping when interrupt
481 * remapping is enabled.
482 * When support for queued invalidation for IOTLB invalidation
483 * is added, we will not need this any more.
485 dmar = (struct acpi_table_dmar *) dmar_tbl;
486 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
488 "Queued invalidation will be enabled to support "
489 "x2apic and Intr-remapping.\n");
492 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
497 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
502 int alloc_iommu(struct dmar_drhd_unit *drhd)
504 struct intel_iommu *iommu;
507 static int iommu_allocated = 0;
510 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
514 iommu->seq_id = iommu_allocated++;
515 sprintf (iommu->name, "dmar%d", iommu->seq_id);
517 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
519 printk(KERN_ERR "IOMMU: can't map the region\n");
522 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
523 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
526 agaw = iommu_calculate_agaw(iommu);
529 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
536 /* the registers might be more than one page */
537 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
538 cap_max_fault_reg_offset(iommu->cap));
539 map_size = VTD_PAGE_ALIGN(map_size);
540 if (map_size > VTD_PAGE_SIZE) {
542 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
544 printk(KERN_ERR "IOMMU: can't map the region\n");
549 ver = readl(iommu->reg + DMAR_VER_REG);
550 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
551 (unsigned long long)drhd->reg_base_addr,
552 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
553 (unsigned long long)iommu->cap,
554 (unsigned long long)iommu->ecap);
556 spin_lock_init(&iommu->register_lock);
565 void free_iommu(struct intel_iommu *iommu)
571 free_dmar_iommu(iommu);
580 * Reclaim all the submitted descriptors which have completed its work.
582 static inline void reclaim_free_desc(struct q_inval *qi)
584 while (qi->desc_status[qi->free_tail] == QI_DONE) {
585 qi->desc_status[qi->free_tail] = QI_FREE;
586 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
591 static int qi_check_fault(struct intel_iommu *iommu, int index)
595 struct q_inval *qi = iommu->qi;
596 int wait_index = (index + 1) % QI_LENGTH;
598 fault = readl(iommu->reg + DMAR_FSTS_REG);
601 * If IQE happens, the head points to the descriptor associated
602 * with the error. No new descriptors are fetched until the IQE
605 if (fault & DMA_FSTS_IQE) {
606 head = readl(iommu->reg + DMAR_IQH_REG);
607 if ((head >> 4) == index) {
608 memcpy(&qi->desc[index], &qi->desc[wait_index],
609 sizeof(struct qi_desc));
610 __iommu_flush_cache(iommu, &qi->desc[index],
611 sizeof(struct qi_desc));
612 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
621 * Submit the queued invalidation descriptor to the remapping
622 * hardware unit and wait for its completion.
624 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
627 struct q_inval *qi = iommu->qi;
628 struct qi_desc *hw, wait_desc;
629 int wait_index, index;
637 spin_lock_irqsave(&qi->q_lock, flags);
638 while (qi->free_cnt < 3) {
639 spin_unlock_irqrestore(&qi->q_lock, flags);
641 spin_lock_irqsave(&qi->q_lock, flags);
644 index = qi->free_head;
645 wait_index = (index + 1) % QI_LENGTH;
647 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
651 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
652 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
653 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
655 hw[wait_index] = wait_desc;
657 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
658 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
660 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
664 * update the HW tail register indicating the presence of
667 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
669 while (qi->desc_status[wait_index] != QI_DONE) {
671 * We will leave the interrupts disabled, to prevent interrupt
672 * context to queue another cmd while a cmd is already submitted
673 * and waiting for completion on this cpu. This is to avoid
674 * a deadlock where the interrupt context can wait indefinitely
675 * for free slots in the queue.
677 rc = qi_check_fault(iommu, index);
681 spin_unlock(&qi->q_lock);
683 spin_lock(&qi->q_lock);
686 qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
688 reclaim_free_desc(qi);
689 spin_unlock_irqrestore(&qi->q_lock, flags);
695 * Flush the global interrupt entry cache.
697 void qi_global_iec(struct intel_iommu *iommu)
701 desc.low = QI_IEC_TYPE;
704 /* should never fail */
705 qi_submit_sync(&desc, iommu);
708 int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
709 u64 type, int non_present_entry_flush)
713 if (non_present_entry_flush) {
714 if (!cap_caching_mode(iommu->cap))
720 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
721 | QI_CC_GRAN(type) | QI_CC_TYPE;
724 return qi_submit_sync(&desc, iommu);
727 int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
728 unsigned int size_order, u64 type,
729 int non_present_entry_flush)
736 if (non_present_entry_flush) {
737 if (!cap_caching_mode(iommu->cap))
743 if (cap_write_drain(iommu->cap))
746 if (cap_read_drain(iommu->cap))
749 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
750 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
751 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
752 | QI_IOTLB_AM(size_order);
754 return qi_submit_sync(&desc, iommu);
758 * Disable Queued Invalidation interface.
760 void dmar_disable_qi(struct intel_iommu *iommu)
764 cycles_t start_time = get_cycles();
766 if (!ecap_qis(iommu->ecap))
769 spin_lock_irqsave(&iommu->register_lock, flags);
771 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
772 if (!(sts & DMA_GSTS_QIES))
776 * Give a chance to HW to complete the pending invalidation requests.
778 while ((readl(iommu->reg + DMAR_IQT_REG) !=
779 readl(iommu->reg + DMAR_IQH_REG)) &&
780 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
783 iommu->gcmd &= ~DMA_GCMD_QIE;
785 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
787 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
788 !(sts & DMA_GSTS_QIES), sts);
790 spin_unlock_irqrestore(&iommu->register_lock, flags);
794 * Enable queued invalidation.
796 static void __dmar_enable_qi(struct intel_iommu *iommu)
800 struct q_inval *qi = iommu->qi;
802 qi->free_head = qi->free_tail = 0;
803 qi->free_cnt = QI_LENGTH;
805 spin_lock_irqsave(&iommu->register_lock, flags);
807 /* write zero to the tail reg */
808 writel(0, iommu->reg + DMAR_IQT_REG);
810 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
812 cmd = iommu->gcmd | DMA_GCMD_QIE;
813 iommu->gcmd |= DMA_GCMD_QIE;
814 writel(cmd, iommu->reg + DMAR_GCMD_REG);
816 /* Make sure hardware complete it */
817 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
819 spin_unlock_irqrestore(&iommu->register_lock, flags);
823 * Enable Queued Invalidation interface. This is a must to support
824 * interrupt-remapping. Also used by DMA-remapping, which replaces
825 * register based IOTLB invalidation.
827 int dmar_enable_qi(struct intel_iommu *iommu)
831 if (!ecap_qis(iommu->ecap))
835 * queued invalidation is already setup and enabled.
840 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
846 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
853 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
854 if (!qi->desc_status) {
855 free_page((unsigned long) qi->desc);
861 qi->free_head = qi->free_tail = 0;
862 qi->free_cnt = QI_LENGTH;
864 spin_lock_init(&qi->q_lock);
866 __dmar_enable_qi(iommu);
871 /* iommu interrupt handling. Most stuff are MSI-like. */
879 static const char *dma_remap_fault_reasons[] =
882 "Present bit in root entry is clear",
883 "Present bit in context entry is clear",
884 "Invalid context entry",
885 "Access beyond MGAW",
886 "PTE Write access is not set",
887 "PTE Read access is not set",
888 "Next page table ptr is invalid",
889 "Root table address invalid",
890 "Context table ptr is invalid",
891 "non-zero reserved fields in RTP",
892 "non-zero reserved fields in CTP",
893 "non-zero reserved fields in PTE",
896 static const char *intr_remap_fault_reasons[] =
898 "Detected reserved fields in the decoded interrupt-remapped request",
899 "Interrupt index exceeded the interrupt-remapping table size",
900 "Present field in the IRTE entry is clear",
901 "Error accessing interrupt-remapping table pointed by IRTA_REG",
902 "Detected reserved fields in the IRTE entry",
903 "Blocked a compatibility format interrupt request",
904 "Blocked an interrupt request due to source-id verification failure",
907 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
909 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
911 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
912 ARRAY_SIZE(intr_remap_fault_reasons))) {
913 *fault_type = INTR_REMAP;
914 return intr_remap_fault_reasons[fault_reason - 0x20];
915 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
916 *fault_type = DMA_REMAP;
917 return dma_remap_fault_reasons[fault_reason];
919 *fault_type = UNKNOWN;
924 void dmar_msi_unmask(unsigned int irq)
926 struct intel_iommu *iommu = get_irq_data(irq);
930 spin_lock_irqsave(&iommu->register_lock, flag);
931 writel(0, iommu->reg + DMAR_FECTL_REG);
932 /* Read a reg to force flush the post write */
933 readl(iommu->reg + DMAR_FECTL_REG);
934 spin_unlock_irqrestore(&iommu->register_lock, flag);
937 void dmar_msi_mask(unsigned int irq)
940 struct intel_iommu *iommu = get_irq_data(irq);
943 spin_lock_irqsave(&iommu->register_lock, flag);
944 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
945 /* Read a reg to force flush the post write */
946 readl(iommu->reg + DMAR_FECTL_REG);
947 spin_unlock_irqrestore(&iommu->register_lock, flag);
950 void dmar_msi_write(int irq, struct msi_msg *msg)
952 struct intel_iommu *iommu = get_irq_data(irq);
955 spin_lock_irqsave(&iommu->register_lock, flag);
956 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
957 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
958 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
959 spin_unlock_irqrestore(&iommu->register_lock, flag);
962 void dmar_msi_read(int irq, struct msi_msg *msg)
964 struct intel_iommu *iommu = get_irq_data(irq);
967 spin_lock_irqsave(&iommu->register_lock, flag);
968 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
969 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
970 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
971 spin_unlock_irqrestore(&iommu->register_lock, flag);
974 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
975 u8 fault_reason, u16 source_id, unsigned long long addr)
980 reason = dmar_get_fault_reason(fault_reason, &fault_type);
982 if (fault_type == INTR_REMAP)
983 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
985 "INTR-REMAP:[fault reason %02d] %s\n",
986 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
987 PCI_FUNC(source_id & 0xFF), addr >> 48,
988 fault_reason, reason);
991 "DMAR:[%s] Request device [%02x:%02x.%d] "
993 "DMAR:[fault reason %02d] %s\n",
994 (type ? "DMA Read" : "DMA Write"),
995 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
996 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1000 #define PRIMARY_FAULT_REG_LEN (16)
1001 irqreturn_t dmar_fault(int irq, void *dev_id)
1003 struct intel_iommu *iommu = dev_id;
1004 int reg, fault_index;
1008 spin_lock_irqsave(&iommu->register_lock, flag);
1009 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1011 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1014 /* TBD: ignore advanced fault log currently */
1015 if (!(fault_status & DMA_FSTS_PPF))
1018 fault_index = dma_fsts_fault_record_index(fault_status);
1019 reg = cap_fault_reg_offset(iommu->cap);
1027 /* highest 32 bits */
1028 data = readl(iommu->reg + reg +
1029 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1030 if (!(data & DMA_FRCD_F))
1033 fault_reason = dma_frcd_fault_reason(data);
1034 type = dma_frcd_type(data);
1036 data = readl(iommu->reg + reg +
1037 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1038 source_id = dma_frcd_source_id(data);
1040 guest_addr = dmar_readq(iommu->reg + reg +
1041 fault_index * PRIMARY_FAULT_REG_LEN);
1042 guest_addr = dma_frcd_page_addr(guest_addr);
1043 /* clear the fault */
1044 writel(DMA_FRCD_F, iommu->reg + reg +
1045 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1047 spin_unlock_irqrestore(&iommu->register_lock, flag);
1049 dmar_fault_do_one(iommu, type, fault_reason,
1050 source_id, guest_addr);
1053 if (fault_index > cap_num_fault_regs(iommu->cap))
1055 spin_lock_irqsave(&iommu->register_lock, flag);
1058 /* clear all the other faults */
1059 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1060 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1062 spin_unlock_irqrestore(&iommu->register_lock, flag);
1066 int dmar_set_interrupt(struct intel_iommu *iommu)
1071 * Check if the fault interrupt is already initialized.
1078 printk(KERN_ERR "IOMMU: no free vectors\n");
1082 set_irq_data(irq, iommu);
1085 ret = arch_setup_dmar_msi(irq);
1087 set_irq_data(irq, NULL);
1093 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1095 printk(KERN_ERR "IOMMU: can't request irq\n");
1099 int __init enable_drhd_fault_handling(void)
1101 struct dmar_drhd_unit *drhd;
1104 * Enable fault control interrupt.
1106 for_each_drhd_unit(drhd) {
1108 struct intel_iommu *iommu = drhd->iommu;
1109 ret = dmar_set_interrupt(iommu);
1112 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1113 " interrupt, ret %d\n",
1114 (unsigned long long)drhd->reg_base_addr, ret);
1123 * Re-enable Queued Invalidation interface.
1125 int dmar_reenable_qi(struct intel_iommu *iommu)
1127 if (!ecap_qis(iommu->ecap))
1134 * First disable queued invalidation.
1136 dmar_disable_qi(iommu);
1138 * Then enable queued invalidation again. Since there is no pending
1139 * invalidation requests now, it's safe to re-enable queued
1142 __dmar_enable_qi(iommu);