3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/config.h>
14 #include <linux/ioport.h>
15 #include <linux/smp_lock.h>
16 #include <linux/pci.h>
17 #include <linux/proc_fs.h>
19 #include <asm/errno.h>
26 #define MSI_TARGET_CPU first_cpu(cpu_online_map)
28 static DEFINE_SPINLOCK(msi_lock);
29 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
30 static kmem_cache_t* msi_cachep;
32 static int pci_msi_enable = 1;
33 static int last_alloc_vector;
34 static int nr_released_vectors;
35 static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
36 static int nr_msix_devices;
38 #ifndef CONFIG_X86_IO_APIC
39 int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
40 u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
43 static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
45 memset(p, 0, NR_IRQS * sizeof(struct msi_desc));
48 static int msi_cache_init(void)
50 msi_cachep = kmem_cache_create("msi_cache",
51 NR_IRQS * sizeof(struct msi_desc),
52 0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL);
59 static void msi_set_mask_bit(unsigned int vector, int flag)
61 struct msi_desc *entry;
63 entry = (struct msi_desc *)msi_desc[vector];
64 if (!entry || !entry->dev || !entry->mask_base)
66 switch (entry->msi_attrib.type) {
72 pos = (long)entry->mask_base;
73 pci_read_config_dword(entry->dev, pos, &mask_bits);
76 pci_write_config_dword(entry->dev, pos, mask_bits);
81 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
82 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
83 writel(flag, entry->mask_base + offset);
92 static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
94 struct msi_desc *entry;
95 struct msg_address address;
96 unsigned int irq = vector;
97 unsigned int dest_cpu = first_cpu(cpu_mask);
99 entry = (struct msi_desc *)msi_desc[vector];
100 if (!entry || !entry->dev)
103 switch (entry->msi_attrib.type) {
108 if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
111 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
112 &address.lo_address.value);
113 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
114 address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
115 MSI_TARGET_CPU_SHIFT);
116 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
117 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
118 address.lo_address.value);
119 set_native_irq_info(irq, cpu_mask);
122 case PCI_CAP_ID_MSIX:
124 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
125 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
127 address.lo_address.value = readl(entry->mask_base + offset);
128 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
129 address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
130 MSI_TARGET_CPU_SHIFT);
131 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
132 writel(address.lo_address.value, entry->mask_base + offset);
133 set_native_irq_info(irq, cpu_mask);
140 #endif /* CONFIG_SMP */
142 static void mask_MSI_irq(unsigned int vector)
144 msi_set_mask_bit(vector, 1);
147 static void unmask_MSI_irq(unsigned int vector)
149 msi_set_mask_bit(vector, 0);
152 static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
154 struct msi_desc *entry;
157 spin_lock_irqsave(&msi_lock, flags);
158 entry = msi_desc[vector];
159 if (!entry || !entry->dev) {
160 spin_unlock_irqrestore(&msi_lock, flags);
163 entry->msi_attrib.state = 1; /* Mark it active */
164 spin_unlock_irqrestore(&msi_lock, flags);
166 return 0; /* never anything pending */
169 static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
171 startup_msi_irq_wo_maskbit(vector);
172 unmask_MSI_irq(vector);
173 return 0; /* never anything pending */
176 static void shutdown_msi_irq(unsigned int vector)
178 struct msi_desc *entry;
181 spin_lock_irqsave(&msi_lock, flags);
182 entry = msi_desc[vector];
183 if (entry && entry->dev)
184 entry->msi_attrib.state = 0; /* Mark it not active */
185 spin_unlock_irqrestore(&msi_lock, flags);
188 static void end_msi_irq_wo_maskbit(unsigned int vector)
190 move_native_irq(vector);
194 static void end_msi_irq_w_maskbit(unsigned int vector)
196 move_native_irq(vector);
197 unmask_MSI_irq(vector);
201 static void do_nothing(unsigned int vector)
206 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
207 * which implement the MSI-X Capability Structure.
209 static struct hw_interrupt_type msix_irq_type = {
210 .typename = "PCI-MSI-X",
211 .startup = startup_msi_irq_w_maskbit,
212 .shutdown = shutdown_msi_irq,
213 .enable = unmask_MSI_irq,
214 .disable = mask_MSI_irq,
216 .end = end_msi_irq_w_maskbit,
217 .set_affinity = set_msi_irq_affinity
221 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
222 * which implement the MSI Capability Structure with
223 * Mask-and-Pending Bits.
225 static struct hw_interrupt_type msi_irq_w_maskbit_type = {
226 .typename = "PCI-MSI",
227 .startup = startup_msi_irq_w_maskbit,
228 .shutdown = shutdown_msi_irq,
229 .enable = unmask_MSI_irq,
230 .disable = mask_MSI_irq,
232 .end = end_msi_irq_w_maskbit,
233 .set_affinity = set_msi_irq_affinity
237 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
238 * which implement the MSI Capability Structure without
239 * Mask-and-Pending Bits.
241 static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
242 .typename = "PCI-MSI",
243 .startup = startup_msi_irq_wo_maskbit,
244 .shutdown = shutdown_msi_irq,
245 .enable = do_nothing,
246 .disable = do_nothing,
248 .end = end_msi_irq_wo_maskbit,
249 .set_affinity = set_msi_irq_affinity
252 static void msi_data_init(struct msg_data *msi_data,
255 memset(msi_data, 0, sizeof(struct msg_data));
256 msi_data->vector = (u8)vector;
257 msi_data->delivery_mode = MSI_DELIVERY_MODE;
258 msi_data->level = MSI_LEVEL_MODE;
259 msi_data->trigger = MSI_TRIGGER_MODE;
262 static void msi_address_init(struct msg_address *msi_address)
264 unsigned int dest_id;
265 unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
267 memset(msi_address, 0, sizeof(struct msg_address));
268 msi_address->hi_address = (u32)0;
269 dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
270 msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
271 msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
272 msi_address->lo_address.u.dest_id = dest_id;
273 msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
276 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
277 static int assign_msi_vector(void)
279 static int new_vector_avail = 1;
284 * msi_lock is provided to ensure that successful allocation of MSI
285 * vector is assigned unique among drivers.
287 spin_lock_irqsave(&msi_lock, flags);
289 if (!new_vector_avail) {
293 * vector_irq[] = -1 indicates that this specific vector is:
294 * - assigned for MSI (since MSI have no associated IRQ) or
295 * - assigned for legacy if less than 16, or
296 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
297 * vector_irq[] = 0 indicates that this vector, previously
298 * assigned for MSI, is freed by hotplug removed operations.
299 * This vector will be reused for any subsequent hotplug added
301 * vector_irq[] > 0 indicates that this vector is assigned for
302 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
303 * vector-to-IOxAPIC IRQ mapping.
305 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
306 if (vector_irq[vector] != 0)
308 free_vector = vector;
309 if (!msi_desc[vector])
315 spin_unlock_irqrestore(&msi_lock, flags);
318 vector_irq[free_vector] = -1;
319 nr_released_vectors--;
320 spin_unlock_irqrestore(&msi_lock, flags);
321 if (msi_desc[free_vector] != NULL) {
325 /* free all linked vectors before re-assign */
327 spin_lock_irqsave(&msi_lock, flags);
328 dev = msi_desc[free_vector]->dev;
329 tail = msi_desc[free_vector]->link.tail;
330 spin_unlock_irqrestore(&msi_lock, flags);
331 msi_free_vector(dev, tail, 1);
332 } while (free_vector != tail);
337 vector = assign_irq_vector(AUTO_ASSIGN);
338 last_alloc_vector = vector;
339 if (vector == LAST_DEVICE_VECTOR)
340 new_vector_avail = 0;
342 spin_unlock_irqrestore(&msi_lock, flags);
346 static int get_new_vector(void)
350 if ((vector = assign_msi_vector()) > 0)
351 set_intr_gate(vector, interrupt[vector]);
356 static int msi_init(void)
358 static int status = -ENOMEM;
365 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
370 if ((status = msi_cache_init()) < 0) {
372 printk(KERN_WARNING "PCI: MSI cache init failed\n");
375 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
376 if (last_alloc_vector < 0) {
378 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
382 vector_irq[last_alloc_vector] = 0;
383 nr_released_vectors++;
388 static int get_msi_vector(struct pci_dev *dev)
390 return get_new_vector();
393 static struct msi_desc* alloc_msi_entry(void)
395 struct msi_desc *entry;
397 entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL);
401 memset(entry, 0, sizeof(struct msi_desc));
402 entry->link.tail = entry->link.head = 0; /* single message */
408 static void attach_msi_entry(struct msi_desc *entry, int vector)
412 spin_lock_irqsave(&msi_lock, flags);
413 msi_desc[vector] = entry;
414 spin_unlock_irqrestore(&msi_lock, flags);
417 static void irq_handler_init(int cap_id, int pos, int mask)
421 spin_lock_irqsave(&irq_desc[pos].lock, flags);
422 if (cap_id == PCI_CAP_ID_MSIX)
423 irq_desc[pos].handler = &msix_irq_type;
426 irq_desc[pos].handler = &msi_irq_wo_maskbit_type;
428 irq_desc[pos].handler = &msi_irq_w_maskbit_type;
430 spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
433 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
437 pci_read_config_word(dev, msi_control_reg(pos), &control);
438 if (type == PCI_CAP_ID_MSI) {
439 /* Set enabled bits to single MSI & enable MSI_enable bit */
440 msi_enable(control, 1);
441 pci_write_config_word(dev, msi_control_reg(pos), control);
443 msix_enable(control);
444 pci_write_config_word(dev, msi_control_reg(pos), control);
446 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
447 /* PCI Express Endpoint device detected */
448 pci_intx(dev, 0); /* disable intx */
452 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
456 pci_read_config_word(dev, msi_control_reg(pos), &control);
457 if (type == PCI_CAP_ID_MSI) {
458 /* Set enabled bits to single MSI & enable MSI_enable bit */
459 msi_disable(control);
460 pci_write_config_word(dev, msi_control_reg(pos), control);
462 msix_disable(control);
463 pci_write_config_word(dev, msi_control_reg(pos), control);
465 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
466 /* PCI Express Endpoint device detected */
467 pci_intx(dev, 1); /* enable intx */
471 static int msi_lookup_vector(struct pci_dev *dev, int type)
476 spin_lock_irqsave(&msi_lock, flags);
477 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
478 if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
479 msi_desc[vector]->msi_attrib.type != type ||
480 msi_desc[vector]->msi_attrib.default_vector != dev->irq)
482 spin_unlock_irqrestore(&msi_lock, flags);
483 /* This pre-assigned MSI vector for this device
484 already exits. Override dev->irq with this vector */
488 spin_unlock_irqrestore(&msi_lock, flags);
493 void pci_scan_msi_device(struct pci_dev *dev)
498 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
500 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
501 nr_reserved_vectors++;
505 * msi_capability_init - configure device's MSI capability structure
506 * @dev: pointer to the pci_dev data structure of MSI device function
508 * Setup the MSI capability structure of device function with a single
509 * MSI vector, regardless of device function is capable of handling
510 * multiple messages. A return of zero indicates the successful setup
511 * of an entry zero with the new MSI vector or non-zero for otherwise.
513 static int msi_capability_init(struct pci_dev *dev)
515 struct msi_desc *entry;
516 struct msg_address address;
517 struct msg_data data;
521 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
522 pci_read_config_word(dev, msi_control_reg(pos), &control);
523 /* MSI Entry Initialization */
524 if (!(entry = alloc_msi_entry()))
527 if ((vector = get_msi_vector(dev)) < 0) {
528 kmem_cache_free(msi_cachep, entry);
531 entry->link.head = vector;
532 entry->link.tail = vector;
533 entry->msi_attrib.type = PCI_CAP_ID_MSI;
534 entry->msi_attrib.state = 0; /* Mark it not active */
535 entry->msi_attrib.entry_nr = 0;
536 entry->msi_attrib.maskbit = is_mask_bit_support(control);
537 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
540 if (is_mask_bit_support(control)) {
541 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
542 is_64bit_address(control));
544 /* Replace with MSI handler */
545 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
546 /* Configure MSI capability structure */
547 msi_address_init(&address);
548 msi_data_init(&data, vector);
549 entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
550 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
551 pci_write_config_dword(dev, msi_lower_address_reg(pos),
552 address.lo_address.value);
553 if (is_64bit_address(control)) {
554 pci_write_config_dword(dev,
555 msi_upper_address_reg(pos), address.hi_address);
556 pci_write_config_word(dev,
557 msi_data_reg(pos, 1), *((u32*)&data));
559 pci_write_config_word(dev,
560 msi_data_reg(pos, 0), *((u32*)&data));
561 if (entry->msi_attrib.maskbit) {
562 unsigned int maskbits, temp;
563 /* All MSIs are unmasked by default, Mask them all */
564 pci_read_config_dword(dev,
565 msi_mask_bits_reg(pos, is_64bit_address(control)),
567 temp = (1 << multi_msi_capable(control));
568 temp = ((temp - 1) & ~temp);
570 pci_write_config_dword(dev,
571 msi_mask_bits_reg(pos, is_64bit_address(control)),
574 attach_msi_entry(entry, vector);
575 /* Set MSI enabled bits */
576 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
582 * msix_capability_init - configure device's MSI-X capability
583 * @dev: pointer to the pci_dev data structure of MSI-X device function
584 * @entries: pointer to an array of struct msix_entry entries
585 * @nvec: number of @entries
587 * Setup the MSI-X capability structure of device function with a
588 * single MSI-X vector. A return of zero indicates the successful setup of
589 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
591 static int msix_capability_init(struct pci_dev *dev,
592 struct msix_entry *entries, int nvec)
594 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
595 struct msg_address address;
596 struct msg_data data;
597 int vector, pos, i, j, nr_entries, temp = 0;
598 u32 phys_addr, table_offset;
603 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
604 /* Request & Map MSI-X table region */
605 pci_read_config_word(dev, msi_control_reg(pos), &control);
606 nr_entries = multi_msix_capable(control);
607 pci_read_config_dword(dev, msix_table_offset_reg(pos),
609 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
610 phys_addr = pci_resource_start (dev, bir);
611 phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
612 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
616 /* MSI-X Table Initialization */
617 for (i = 0; i < nvec; i++) {
618 entry = alloc_msi_entry();
621 if ((vector = get_msi_vector(dev)) < 0)
624 j = entries[i].entry;
625 entries[i].vector = vector;
626 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
627 entry->msi_attrib.state = 0; /* Mark it not active */
628 entry->msi_attrib.entry_nr = j;
629 entry->msi_attrib.maskbit = 1;
630 entry->msi_attrib.default_vector = dev->irq;
632 entry->mask_base = base;
634 entry->link.head = vector;
635 entry->link.tail = vector;
638 entry->link.head = temp;
639 entry->link.tail = tail->link.tail;
640 tail->link.tail = vector;
641 head->link.head = vector;
645 /* Replace with MSI-X handler */
646 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
647 /* Configure MSI-X capability structure */
648 msi_address_init(&address);
649 msi_data_init(&data, vector);
650 entry->msi_attrib.current_cpu =
651 ((address.lo_address.u.dest_id >>
652 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
653 writel(address.lo_address.value,
654 base + j * PCI_MSIX_ENTRY_SIZE +
655 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
656 writel(address.hi_address,
657 base + j * PCI_MSIX_ENTRY_SIZE +
658 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
660 base + j * PCI_MSIX_ENTRY_SIZE +
661 PCI_MSIX_ENTRY_DATA_OFFSET);
662 attach_msi_entry(entry, vector);
666 for (; i >= 0; i--) {
667 vector = (entries + i)->vector;
668 msi_free_vector(dev, vector, 0);
669 (entries + i)->vector = 0;
673 /* Set MSI-X enabled bits */
674 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
680 * pci_enable_msi - configure device's MSI capability structure
681 * @dev: pointer to the pci_dev data structure of MSI device function
683 * Setup the MSI capability structure of device function with
684 * a single MSI vector upon its software driver call to request for
685 * MSI mode enabled on its hardware device function. A return of zero
686 * indicates the successful setup of an entry zero with the new MSI
687 * vector or non-zero for otherwise.
689 int pci_enable_msi(struct pci_dev* dev)
691 int pos, temp, status = -EINVAL;
694 if (!pci_msi_enable || !dev)
702 if ((status = msi_init()) < 0)
705 if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
708 pci_read_config_word(dev, msi_control_reg(pos), &control);
709 if (control & PCI_MSI_FLAGS_ENABLE)
710 return 0; /* Already in MSI mode */
712 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
716 spin_lock_irqsave(&msi_lock, flags);
717 if (!vector_irq[dev->irq]) {
718 msi_desc[dev->irq]->msi_attrib.state = 0;
719 vector_irq[dev->irq] = -1;
720 nr_released_vectors--;
721 spin_unlock_irqrestore(&msi_lock, flags);
722 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
725 spin_unlock_irqrestore(&msi_lock, flags);
728 /* Check whether driver already requested for MSI-X vectors */
729 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
730 !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
731 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
732 "Device already has MSI-X vectors assigned\n",
737 status = msi_capability_init(dev);
740 nr_reserved_vectors--; /* Only MSI capable */
741 else if (nr_msix_devices > 0)
742 nr_msix_devices--; /* Both MSI and MSI-X capable,
743 but choose enabling MSI */
749 void pci_disable_msi(struct pci_dev* dev)
751 struct msi_desc *entry;
752 int pos, default_vector;
756 if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
759 pci_read_config_word(dev, msi_control_reg(pos), &control);
760 if (!(control & PCI_MSI_FLAGS_ENABLE))
763 spin_lock_irqsave(&msi_lock, flags);
764 entry = msi_desc[dev->irq];
765 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
766 spin_unlock_irqrestore(&msi_lock, flags);
769 if (entry->msi_attrib.state) {
770 spin_unlock_irqrestore(&msi_lock, flags);
771 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
772 "free_irq() on MSI vector %d\n",
773 pci_name(dev), dev->irq);
774 BUG_ON(entry->msi_attrib.state > 0);
776 vector_irq[dev->irq] = 0; /* free it */
777 nr_released_vectors++;
778 default_vector = entry->msi_attrib.default_vector;
779 spin_unlock_irqrestore(&msi_lock, flags);
780 /* Restore dev->irq to its default pin-assertion vector */
781 dev->irq = default_vector;
782 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
787 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
789 struct msi_desc *entry;
790 int head, entry_nr, type;
794 spin_lock_irqsave(&msi_lock, flags);
795 entry = msi_desc[vector];
796 if (!entry || entry->dev != dev) {
797 spin_unlock_irqrestore(&msi_lock, flags);
800 type = entry->msi_attrib.type;
801 entry_nr = entry->msi_attrib.entry_nr;
802 head = entry->link.head;
803 base = entry->mask_base;
804 msi_desc[entry->link.head]->link.tail = entry->link.tail;
805 msi_desc[entry->link.tail]->link.head = entry->link.head;
808 vector_irq[vector] = 0;
809 nr_released_vectors++;
811 msi_desc[vector] = NULL;
812 spin_unlock_irqrestore(&msi_lock, flags);
814 kmem_cache_free(msi_cachep, entry);
816 if (type == PCI_CAP_ID_MSIX) {
819 entry_nr * PCI_MSIX_ENTRY_SIZE +
820 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
822 if (head == vector) {
824 * Detect last MSI-X vector to be released.
825 * Release the MSI-X memory-mapped table.
828 u32 phys_addr, table_offset;
832 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
833 pci_read_config_word(dev, msi_control_reg(pos),
835 nr_entries = multi_msix_capable(control);
836 pci_read_config_dword(dev, msix_table_offset_reg(pos),
838 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
839 phys_addr = pci_resource_start (dev, bir);
840 phys_addr += (u32)(table_offset &
841 ~PCI_MSIX_FLAGS_BIRMASK);
849 static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
851 int vector = head, tail = 0;
852 int i, j = 0, nr_entries = 0;
856 spin_lock_irqsave(&msi_lock, flags);
857 while (head != tail) {
859 tail = msi_desc[vector]->link.tail;
860 if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
864 if (*nvec > nr_entries) {
865 spin_unlock_irqrestore(&msi_lock, flags);
869 vector = ((j > 0) ? j : head);
870 for (i = 0; i < *nvec; i++) {
871 j = msi_desc[vector]->msi_attrib.entry_nr;
872 msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
873 vector_irq[vector] = -1; /* Mark it busy */
874 nr_released_vectors--;
875 entries[i].vector = vector;
876 if (j != (entries + i)->entry) {
877 base = msi_desc[vector]->mask_base;
878 msi_desc[vector]->msi_attrib.entry_nr =
879 (entries + i)->entry;
880 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
881 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
882 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
883 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
884 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
885 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
886 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
887 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
888 writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
889 PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
890 base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
891 PCI_MSIX_ENTRY_DATA_OFFSET);
893 vector = msi_desc[vector]->link.tail;
895 spin_unlock_irqrestore(&msi_lock, flags);
901 * pci_enable_msix - configure device's MSI-X capability structure
902 * @dev: pointer to the pci_dev data structure of MSI-X device function
903 * @entries: pointer to an array of MSI-X entries
904 * @nvec: number of MSI-X vectors requested for allocation by device driver
906 * Setup the MSI-X capability structure of device function with the number
907 * of requested vectors upon its software driver call to request for
908 * MSI-X mode enabled on its hardware device function. A return of zero
909 * indicates the successful configuration of MSI-X capability structure
910 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
911 * Or a return of > 0 indicates that driver request is exceeding the number
912 * of vectors available. Driver should use the returned value to re-send
915 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
917 int status, pos, nr_entries, free_vectors;
922 if (!pci_msi_enable || !dev || !entries)
925 if ((status = msi_init()) < 0)
928 if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
931 pci_read_config_word(dev, msi_control_reg(pos), &control);
932 if (control & PCI_MSIX_FLAGS_ENABLE)
933 return -EINVAL; /* Already in MSI-X mode */
935 nr_entries = multi_msix_capable(control);
936 if (nvec > nr_entries)
939 /* Check for any invalid entries */
940 for (i = 0; i < nvec; i++) {
941 if (entries[i].entry >= nr_entries)
942 return -EINVAL; /* invalid entry */
943 for (j = i + 1; j < nvec; j++) {
944 if (entries[i].entry == entries[j].entry)
945 return -EINVAL; /* duplicate entry */
949 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
952 /* Reroute MSI-X table */
953 if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
954 /* #requested > #previous-assigned */
959 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
962 /* Check whether driver already requested for MSI vector */
963 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
964 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
965 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
966 "Device already has an MSI vector assigned\n",
972 spin_lock_irqsave(&msi_lock, flags);
974 * msi_lock is provided to ensure that enough vectors resources are
975 * available before granting.
977 free_vectors = pci_vector_resources(last_alloc_vector,
978 nr_released_vectors);
979 /* Ensure that each MSI/MSI-X device has one vector reserved by
980 default to avoid any MSI-X driver to take all available
982 free_vectors -= nr_reserved_vectors;
983 /* Find the average of free vectors among MSI-X devices */
984 if (nr_msix_devices > 0)
985 free_vectors /= nr_msix_devices;
986 spin_unlock_irqrestore(&msi_lock, flags);
988 if (nvec > free_vectors) {
989 if (free_vectors > 0)
995 status = msix_capability_init(dev, entries, nvec);
996 if (!status && nr_msix_devices > 0)
1002 void pci_disable_msix(struct pci_dev* dev)
1007 if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
1010 pci_read_config_word(dev, msi_control_reg(pos), &control);
1011 if (!(control & PCI_MSIX_FLAGS_ENABLE))
1015 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1016 int state, vector, head, tail = 0, warning = 0;
1017 unsigned long flags;
1019 vector = head = dev->irq;
1020 spin_lock_irqsave(&msi_lock, flags);
1021 while (head != tail) {
1022 state = msi_desc[vector]->msi_attrib.state;
1026 vector_irq[vector] = 0; /* free it */
1027 nr_released_vectors++;
1029 tail = msi_desc[vector]->link.tail;
1032 spin_unlock_irqrestore(&msi_lock, flags);
1035 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1036 "free_irq() on all MSI-X vectors\n",
1038 BUG_ON(warning > 0);
1041 disable_msi_mode(dev,
1042 pci_find_capability(dev, PCI_CAP_ID_MSIX),
1050 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1051 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1053 * Being called during hotplug remove, from which the device function
1054 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1055 * allocated for this device function, are reclaimed to unused state,
1056 * which may be used later on.
1058 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1060 int state, pos, temp;
1061 unsigned long flags;
1063 if (!pci_msi_enable || !dev)
1066 temp = dev->irq; /* Save IOAPIC IRQ */
1067 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 &&
1068 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1069 spin_lock_irqsave(&msi_lock, flags);
1070 state = msi_desc[dev->irq]->msi_attrib.state;
1071 spin_unlock_irqrestore(&msi_lock, flags);
1073 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1074 "called without free_irq() on MSI vector %d\n",
1075 pci_name(dev), dev->irq);
1077 } else /* Release MSI vector assigned to this device */
1078 msi_free_vector(dev, dev->irq, 0);
1079 dev->irq = temp; /* Restore IOAPIC IRQ */
1081 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
1082 !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1083 int vector, head, tail = 0, warning = 0;
1084 void __iomem *base = NULL;
1086 vector = head = dev->irq;
1087 while (head != tail) {
1088 spin_lock_irqsave(&msi_lock, flags);
1089 state = msi_desc[vector]->msi_attrib.state;
1090 tail = msi_desc[vector]->link.tail;
1091 base = msi_desc[vector]->mask_base;
1092 spin_unlock_irqrestore(&msi_lock, flags);
1095 else if (vector != head) /* Release MSI-X vector */
1096 msi_free_vector(dev, vector, 0);
1099 msi_free_vector(dev, vector, 0);
1101 /* Force to release the MSI-X memory-mapped table */
1102 u32 phys_addr, table_offset;
1106 pci_read_config_word(dev, msi_control_reg(pos),
1108 pci_read_config_dword(dev, msix_table_offset_reg(pos),
1110 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
1111 phys_addr = pci_resource_start (dev, bir);
1112 phys_addr += (u32)(table_offset &
1113 ~PCI_MSIX_FLAGS_BIRMASK);
1115 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1116 "called without free_irq() on all MSI-X vectors\n",
1118 BUG_ON(warning > 0);
1120 dev->irq = temp; /* Restore IOAPIC IRQ */
1124 EXPORT_SYMBOL(pci_enable_msi);
1125 EXPORT_SYMBOL(pci_disable_msi);
1126 EXPORT_SYMBOL(pci_enable_msix);
1127 EXPORT_SYMBOL(pci_disable_msix);