Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-2.6] / drivers / pci / msi.c
1 /*
2  * File:        msi.c
3  * Purpose:     PCI Message Signaled Interrupt (MSI)
4  *
5  * Copyright (C) 2003-2004 Intel
6  * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7  */
8
9 #include <linux/mm.h>
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/config.h>
14 #include <linux/ioport.h>
15 #include <linux/smp_lock.h>
16 #include <linux/pci.h>
17 #include <linux/proc_fs.h>
18
19 #include <asm/errno.h>
20 #include <asm/io.h>
21 #include <asm/smp.h>
22
23 #include "pci.h"
24 #include "msi.h"
25
26 #define MSI_TARGET_CPU          first_cpu(cpu_online_map)
27
28 static DEFINE_SPINLOCK(msi_lock);
29 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
30 static kmem_cache_t* msi_cachep;
31
32 static int pci_msi_enable = 1;
33 static int last_alloc_vector;
34 static int nr_released_vectors;
35 static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
36 static int nr_msix_devices;
37
38 #ifndef CONFIG_X86_IO_APIC
39 int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
40 u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
41 #endif
42
43 static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
44 {
45         memset(p, 0, NR_IRQS * sizeof(struct msi_desc));
46 }
47
48 static int msi_cache_init(void)
49 {
50         msi_cachep = kmem_cache_create("msi_cache",
51                         NR_IRQS * sizeof(struct msi_desc),
52                         0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL);
53         if (!msi_cachep)
54                 return -ENOMEM;
55
56         return 0;
57 }
58
59 static void msi_set_mask_bit(unsigned int vector, int flag)
60 {
61         struct msi_desc *entry;
62
63         entry = (struct msi_desc *)msi_desc[vector];
64         if (!entry || !entry->dev || !entry->mask_base)
65                 return;
66         switch (entry->msi_attrib.type) {
67         case PCI_CAP_ID_MSI:
68         {
69                 int             pos;
70                 u32             mask_bits;
71
72                 pos = (long)entry->mask_base;
73                 pci_read_config_dword(entry->dev, pos, &mask_bits);
74                 mask_bits &= ~(1);
75                 mask_bits |= flag;
76                 pci_write_config_dword(entry->dev, pos, mask_bits);
77                 break;
78         }
79         case PCI_CAP_ID_MSIX:
80         {
81                 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
82                         PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
83                 writel(flag, entry->mask_base + offset);
84                 break;
85         }
86         default:
87                 break;
88         }
89 }
90
91 #ifdef CONFIG_SMP
92 static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
93 {
94         struct msi_desc *entry;
95         struct msg_address address;
96         unsigned int irq = vector;
97         unsigned int dest_cpu = first_cpu(cpu_mask);
98
99         entry = (struct msi_desc *)msi_desc[vector];
100         if (!entry || !entry->dev)
101                 return;
102
103         switch (entry->msi_attrib.type) {
104         case PCI_CAP_ID_MSI:
105         {
106                 int pos;
107
108                 if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
109                         return;
110
111                 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
112                         &address.lo_address.value);
113                 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
114                 address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
115                                                                         MSI_TARGET_CPU_SHIFT);
116                 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
117                 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
118                         address.lo_address.value);
119                 set_native_irq_info(irq, cpu_mask);
120                 break;
121         }
122         case PCI_CAP_ID_MSIX:
123         {
124                 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
125                         PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
126
127                 address.lo_address.value = readl(entry->mask_base + offset);
128                 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
129                 address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
130                                                                         MSI_TARGET_CPU_SHIFT);
131                 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
132                 writel(address.lo_address.value, entry->mask_base + offset);
133                 set_native_irq_info(irq, cpu_mask);
134                 break;
135         }
136         default:
137                 break;
138         }
139 }
140 #endif /* CONFIG_SMP */
141
142 static void mask_MSI_irq(unsigned int vector)
143 {
144         msi_set_mask_bit(vector, 1);
145 }
146
147 static void unmask_MSI_irq(unsigned int vector)
148 {
149         msi_set_mask_bit(vector, 0);
150 }
151
152 static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
153 {
154         struct msi_desc *entry;
155         unsigned long flags;
156
157         spin_lock_irqsave(&msi_lock, flags);
158         entry = msi_desc[vector];
159         if (!entry || !entry->dev) {
160                 spin_unlock_irqrestore(&msi_lock, flags);
161                 return 0;
162         }
163         entry->msi_attrib.state = 1;    /* Mark it active */
164         spin_unlock_irqrestore(&msi_lock, flags);
165
166         return 0;       /* never anything pending */
167 }
168
169 static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
170 {
171         startup_msi_irq_wo_maskbit(vector);
172         unmask_MSI_irq(vector);
173         return 0;       /* never anything pending */
174 }
175
176 static void shutdown_msi_irq(unsigned int vector)
177 {
178         struct msi_desc *entry;
179         unsigned long flags;
180
181         spin_lock_irqsave(&msi_lock, flags);
182         entry = msi_desc[vector];
183         if (entry && entry->dev)
184                 entry->msi_attrib.state = 0;    /* Mark it not active */
185         spin_unlock_irqrestore(&msi_lock, flags);
186 }
187
188 static void end_msi_irq_wo_maskbit(unsigned int vector)
189 {
190         move_native_irq(vector);
191         ack_APIC_irq();
192 }
193
194 static void end_msi_irq_w_maskbit(unsigned int vector)
195 {
196         move_native_irq(vector);
197         unmask_MSI_irq(vector);
198         ack_APIC_irq();
199 }
200
201 static void do_nothing(unsigned int vector)
202 {
203 }
204
205 /*
206  * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
207  * which implement the MSI-X Capability Structure.
208  */
209 static struct hw_interrupt_type msix_irq_type = {
210         .typename       = "PCI-MSI-X",
211         .startup        = startup_msi_irq_w_maskbit,
212         .shutdown       = shutdown_msi_irq,
213         .enable         = unmask_MSI_irq,
214         .disable        = mask_MSI_irq,
215         .ack            = mask_MSI_irq,
216         .end            = end_msi_irq_w_maskbit,
217         .set_affinity   = set_msi_irq_affinity
218 };
219
220 /*
221  * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
222  * which implement the MSI Capability Structure with
223  * Mask-and-Pending Bits.
224  */
225 static struct hw_interrupt_type msi_irq_w_maskbit_type = {
226         .typename       = "PCI-MSI",
227         .startup        = startup_msi_irq_w_maskbit,
228         .shutdown       = shutdown_msi_irq,
229         .enable         = unmask_MSI_irq,
230         .disable        = mask_MSI_irq,
231         .ack            = mask_MSI_irq,
232         .end            = end_msi_irq_w_maskbit,
233         .set_affinity   = set_msi_irq_affinity
234 };
235
236 /*
237  * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
238  * which implement the MSI Capability Structure without
239  * Mask-and-Pending Bits.
240  */
241 static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
242         .typename       = "PCI-MSI",
243         .startup        = startup_msi_irq_wo_maskbit,
244         .shutdown       = shutdown_msi_irq,
245         .enable         = do_nothing,
246         .disable        = do_nothing,
247         .ack            = do_nothing,
248         .end            = end_msi_irq_wo_maskbit,
249         .set_affinity   = set_msi_irq_affinity
250 };
251
252 static void msi_data_init(struct msg_data *msi_data,
253                           unsigned int vector)
254 {
255         memset(msi_data, 0, sizeof(struct msg_data));
256         msi_data->vector = (u8)vector;
257         msi_data->delivery_mode = MSI_DELIVERY_MODE;
258         msi_data->level = MSI_LEVEL_MODE;
259         msi_data->trigger = MSI_TRIGGER_MODE;
260 }
261
262 static void msi_address_init(struct msg_address *msi_address)
263 {
264         unsigned int    dest_id;
265         unsigned long   dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
266
267         memset(msi_address, 0, sizeof(struct msg_address));
268         msi_address->hi_address = (u32)0;
269         dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
270         msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
271         msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
272         msi_address->lo_address.u.dest_id = dest_id;
273         msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
274 }
275
276 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
277 static int assign_msi_vector(void)
278 {
279         static int new_vector_avail = 1;
280         int vector;
281         unsigned long flags;
282
283         /*
284          * msi_lock is provided to ensure that successful allocation of MSI
285          * vector is assigned unique among drivers.
286          */
287         spin_lock_irqsave(&msi_lock, flags);
288
289         if (!new_vector_avail) {
290                 int free_vector = 0;
291
292                 /*
293                  * vector_irq[] = -1 indicates that this specific vector is:
294                  * - assigned for MSI (since MSI have no associated IRQ) or
295                  * - assigned for legacy if less than 16, or
296                  * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
297                  * vector_irq[] = 0 indicates that this vector, previously
298                  * assigned for MSI, is freed by hotplug removed operations.
299                  * This vector will be reused for any subsequent hotplug added
300                  * operations.
301                  * vector_irq[] > 0 indicates that this vector is assigned for
302                  * IOxAPIC IRQs. This vector and its value provides a 1-to-1
303                  * vector-to-IOxAPIC IRQ mapping.
304                  */
305                 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
306                         if (vector_irq[vector] != 0)
307                                 continue;
308                         free_vector = vector;
309                         if (!msi_desc[vector])
310                                 break;
311                         else
312                                 continue;
313                 }
314                 if (!free_vector) {
315                         spin_unlock_irqrestore(&msi_lock, flags);
316                         return -EBUSY;
317                 }
318                 vector_irq[free_vector] = -1;
319                 nr_released_vectors--;
320                 spin_unlock_irqrestore(&msi_lock, flags);
321                 if (msi_desc[free_vector] != NULL) {
322                         struct pci_dev *dev;
323                         int tail;
324
325                         /* free all linked vectors before re-assign */
326                         do {
327                                 spin_lock_irqsave(&msi_lock, flags);
328                                 dev = msi_desc[free_vector]->dev;
329                                 tail = msi_desc[free_vector]->link.tail;
330                                 spin_unlock_irqrestore(&msi_lock, flags);
331                                 msi_free_vector(dev, tail, 1);
332                         } while (free_vector != tail);
333                 }
334
335                 return free_vector;
336         }
337         vector = assign_irq_vector(AUTO_ASSIGN);
338         last_alloc_vector = vector;
339         if (vector  == LAST_DEVICE_VECTOR)
340                 new_vector_avail = 0;
341
342         spin_unlock_irqrestore(&msi_lock, flags);
343         return vector;
344 }
345
346 static int get_new_vector(void)
347 {
348         int vector;
349
350         if ((vector = assign_msi_vector()) > 0)
351                 set_intr_gate(vector, interrupt[vector]);
352
353         return vector;
354 }
355
356 static int msi_init(void)
357 {
358         static int status = -ENOMEM;
359
360         if (!status)
361                 return status;
362
363         if (pci_msi_quirk) {
364                 pci_msi_enable = 0;
365                 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
366                 status = -EINVAL;
367                 return status;
368         }
369
370         if ((status = msi_cache_init()) < 0) {
371                 pci_msi_enable = 0;
372                 printk(KERN_WARNING "PCI: MSI cache init failed\n");
373                 return status;
374         }
375         last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
376         if (last_alloc_vector < 0) {
377                 pci_msi_enable = 0;
378                 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
379                 status = -EBUSY;
380                 return status;
381         }
382         vector_irq[last_alloc_vector] = 0;
383         nr_released_vectors++;
384
385         return status;
386 }
387
388 static int get_msi_vector(struct pci_dev *dev)
389 {
390         return get_new_vector();
391 }
392
393 static struct msi_desc* alloc_msi_entry(void)
394 {
395         struct msi_desc *entry;
396
397         entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL);
398         if (!entry)
399                 return NULL;
400
401         memset(entry, 0, sizeof(struct msi_desc));
402         entry->link.tail = entry->link.head = 0;        /* single message */
403         entry->dev = NULL;
404
405         return entry;
406 }
407
408 static void attach_msi_entry(struct msi_desc *entry, int vector)
409 {
410         unsigned long flags;
411
412         spin_lock_irqsave(&msi_lock, flags);
413         msi_desc[vector] = entry;
414         spin_unlock_irqrestore(&msi_lock, flags);
415 }
416
417 static void irq_handler_init(int cap_id, int pos, int mask)
418 {
419         unsigned long flags;
420
421         spin_lock_irqsave(&irq_desc[pos].lock, flags);
422         if (cap_id == PCI_CAP_ID_MSIX)
423                 irq_desc[pos].handler = &msix_irq_type;
424         else {
425                 if (!mask)
426                         irq_desc[pos].handler = &msi_irq_wo_maskbit_type;
427                 else
428                         irq_desc[pos].handler = &msi_irq_w_maskbit_type;
429         }
430         spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
431 }
432
433 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
434 {
435         u16 control;
436
437         pci_read_config_word(dev, msi_control_reg(pos), &control);
438         if (type == PCI_CAP_ID_MSI) {
439                 /* Set enabled bits to single MSI & enable MSI_enable bit */
440                 msi_enable(control, 1);
441                 pci_write_config_word(dev, msi_control_reg(pos), control);
442         } else {
443                 msix_enable(control);
444                 pci_write_config_word(dev, msi_control_reg(pos), control);
445         }
446         if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
447                 /* PCI Express Endpoint device detected */
448                 pci_intx(dev, 0);  /* disable intx */
449         }
450 }
451
452 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
453 {
454         u16 control;
455
456         pci_read_config_word(dev, msi_control_reg(pos), &control);
457         if (type == PCI_CAP_ID_MSI) {
458                 /* Set enabled bits to single MSI & enable MSI_enable bit */
459                 msi_disable(control);
460                 pci_write_config_word(dev, msi_control_reg(pos), control);
461         } else {
462                 msix_disable(control);
463                 pci_write_config_word(dev, msi_control_reg(pos), control);
464         }
465         if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
466                 /* PCI Express Endpoint device detected */
467                 pci_intx(dev, 1);  /* enable intx */
468         }
469 }
470
471 static int msi_lookup_vector(struct pci_dev *dev, int type)
472 {
473         int vector;
474         unsigned long flags;
475
476         spin_lock_irqsave(&msi_lock, flags);
477         for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
478                 if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
479                         msi_desc[vector]->msi_attrib.type != type ||
480                         msi_desc[vector]->msi_attrib.default_vector != dev->irq)
481                         continue;
482                 spin_unlock_irqrestore(&msi_lock, flags);
483                 /* This pre-assigned MSI vector for this device
484                    already exits. Override dev->irq with this vector */
485                 dev->irq = vector;
486                 return 0;
487         }
488         spin_unlock_irqrestore(&msi_lock, flags);
489
490         return -EACCES;
491 }
492
493 void pci_scan_msi_device(struct pci_dev *dev)
494 {
495         if (!dev)
496                 return;
497
498         if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
499                 nr_msix_devices++;
500         else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
501                 nr_reserved_vectors++;
502 }
503
504 /**
505  * msi_capability_init - configure device's MSI capability structure
506  * @dev: pointer to the pci_dev data structure of MSI device function
507  *
508  * Setup the MSI capability structure of device function with a single
509  * MSI vector, regardless of device function is capable of handling
510  * multiple messages. A return of zero indicates the successful setup
511  * of an entry zero with the new MSI vector or non-zero for otherwise.
512  **/
513 static int msi_capability_init(struct pci_dev *dev)
514 {
515         struct msi_desc *entry;
516         struct msg_address address;
517         struct msg_data data;
518         int pos, vector;
519         u16 control;
520
521         pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
522         pci_read_config_word(dev, msi_control_reg(pos), &control);
523         /* MSI Entry Initialization */
524         if (!(entry = alloc_msi_entry()))
525                 return -ENOMEM;
526
527         if ((vector = get_msi_vector(dev)) < 0) {
528                 kmem_cache_free(msi_cachep, entry);
529                 return -EBUSY;
530         }
531         entry->link.head = vector;
532         entry->link.tail = vector;
533         entry->msi_attrib.type = PCI_CAP_ID_MSI;
534         entry->msi_attrib.state = 0;                    /* Mark it not active */
535         entry->msi_attrib.entry_nr = 0;
536         entry->msi_attrib.maskbit = is_mask_bit_support(control);
537         entry->msi_attrib.default_vector = dev->irq;    /* Save IOAPIC IRQ */
538         dev->irq = vector;
539         entry->dev = dev;
540         if (is_mask_bit_support(control)) {
541                 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
542                                 is_64bit_address(control));
543         }
544         /* Replace with MSI handler */
545         irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
546         /* Configure MSI capability structure */
547         msi_address_init(&address);
548         msi_data_init(&data, vector);
549         entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
550                                 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
551         pci_write_config_dword(dev, msi_lower_address_reg(pos),
552                         address.lo_address.value);
553         if (is_64bit_address(control)) {
554                 pci_write_config_dword(dev,
555                         msi_upper_address_reg(pos), address.hi_address);
556                 pci_write_config_word(dev,
557                         msi_data_reg(pos, 1), *((u32*)&data));
558         } else
559                 pci_write_config_word(dev,
560                         msi_data_reg(pos, 0), *((u32*)&data));
561         if (entry->msi_attrib.maskbit) {
562                 unsigned int maskbits, temp;
563                 /* All MSIs are unmasked by default, Mask them all */
564                 pci_read_config_dword(dev,
565                         msi_mask_bits_reg(pos, is_64bit_address(control)),
566                         &maskbits);
567                 temp = (1 << multi_msi_capable(control));
568                 temp = ((temp - 1) & ~temp);
569                 maskbits |= temp;
570                 pci_write_config_dword(dev,
571                         msi_mask_bits_reg(pos, is_64bit_address(control)),
572                         maskbits);
573         }
574         attach_msi_entry(entry, vector);
575         /* Set MSI enabled bits  */
576         enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
577
578         return 0;
579 }
580
581 /**
582  * msix_capability_init - configure device's MSI-X capability
583  * @dev: pointer to the pci_dev data structure of MSI-X device function
584  * @entries: pointer to an array of struct msix_entry entries
585  * @nvec: number of @entries
586  *
587  * Setup the MSI-X capability structure of device function with a
588  * single MSI-X vector. A return of zero indicates the successful setup of
589  * requested MSI-X entries with allocated vectors or non-zero for otherwise.
590  **/
591 static int msix_capability_init(struct pci_dev *dev,
592                                 struct msix_entry *entries, int nvec)
593 {
594         struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
595         struct msg_address address;
596         struct msg_data data;
597         int vector, pos, i, j, nr_entries, temp = 0;
598         u32 phys_addr, table_offset;
599         u16 control;
600         u8 bir;
601         void __iomem *base;
602
603         pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
604         /* Request & Map MSI-X table region */
605         pci_read_config_word(dev, msi_control_reg(pos), &control);
606         nr_entries = multi_msix_capable(control);
607         pci_read_config_dword(dev, msix_table_offset_reg(pos),
608                 &table_offset);
609         bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
610         phys_addr = pci_resource_start (dev, bir);
611         phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
612         base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
613         if (base == NULL)
614                 return -ENOMEM;
615
616         /* MSI-X Table Initialization */
617         for (i = 0; i < nvec; i++) {
618                 entry = alloc_msi_entry();
619                 if (!entry)
620                         break;
621                 if ((vector = get_msi_vector(dev)) < 0)
622                         break;
623
624                 j = entries[i].entry;
625                 entries[i].vector = vector;
626                 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
627                 entry->msi_attrib.state = 0;            /* Mark it not active */
628                 entry->msi_attrib.entry_nr = j;
629                 entry->msi_attrib.maskbit = 1;
630                 entry->msi_attrib.default_vector = dev->irq;
631                 entry->dev = dev;
632                 entry->mask_base = base;
633                 if (!head) {
634                         entry->link.head = vector;
635                         entry->link.tail = vector;
636                         head = entry;
637                 } else {
638                         entry->link.head = temp;
639                         entry->link.tail = tail->link.tail;
640                         tail->link.tail = vector;
641                         head->link.head = vector;
642                 }
643                 temp = vector;
644                 tail = entry;
645                 /* Replace with MSI-X handler */
646                 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
647                 /* Configure MSI-X capability structure */
648                 msi_address_init(&address);
649                 msi_data_init(&data, vector);
650                 entry->msi_attrib.current_cpu =
651                         ((address.lo_address.u.dest_id >>
652                         MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
653                 writel(address.lo_address.value,
654                         base + j * PCI_MSIX_ENTRY_SIZE +
655                         PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
656                 writel(address.hi_address,
657                         base + j * PCI_MSIX_ENTRY_SIZE +
658                         PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
659                 writel(*(u32*)&data,
660                         base + j * PCI_MSIX_ENTRY_SIZE +
661                         PCI_MSIX_ENTRY_DATA_OFFSET);
662                 attach_msi_entry(entry, vector);
663         }
664         if (i != nvec) {
665                 i--;
666                 for (; i >= 0; i--) {
667                         vector = (entries + i)->vector;
668                         msi_free_vector(dev, vector, 0);
669                         (entries + i)->vector = 0;
670                 }
671                 return -EBUSY;
672         }
673         /* Set MSI-X enabled bits */
674         enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
675
676         return 0;
677 }
678
679 /**
680  * pci_enable_msi - configure device's MSI capability structure
681  * @dev: pointer to the pci_dev data structure of MSI device function
682  *
683  * Setup the MSI capability structure of device function with
684  * a single MSI vector upon its software driver call to request for
685  * MSI mode enabled on its hardware device function. A return of zero
686  * indicates the successful setup of an entry zero with the new MSI
687  * vector or non-zero for otherwise.
688  **/
689 int pci_enable_msi(struct pci_dev* dev)
690 {
691         int pos, temp, status = -EINVAL;
692         u16 control;
693
694         if (!pci_msi_enable || !dev)
695                 return status;
696
697         if (dev->no_msi)
698                 return status;
699
700         temp = dev->irq;
701
702         if ((status = msi_init()) < 0)
703                 return status;
704
705         if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
706                 return -EINVAL;
707
708         pci_read_config_word(dev, msi_control_reg(pos), &control);
709         if (control & PCI_MSI_FLAGS_ENABLE)
710                 return 0;                       /* Already in MSI mode */
711
712         if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
713                 /* Lookup Sucess */
714                 unsigned long flags;
715
716                 spin_lock_irqsave(&msi_lock, flags);
717                 if (!vector_irq[dev->irq]) {
718                         msi_desc[dev->irq]->msi_attrib.state = 0;
719                         vector_irq[dev->irq] = -1;
720                         nr_released_vectors--;
721                         spin_unlock_irqrestore(&msi_lock, flags);
722                         enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
723                         return 0;
724                 }
725                 spin_unlock_irqrestore(&msi_lock, flags);
726                 dev->irq = temp;
727         }
728         /* Check whether driver already requested for MSI-X vectors */
729         if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
730                 !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
731                         printk(KERN_INFO "PCI: %s: Can't enable MSI.  "
732                                "Device already has MSI-X vectors assigned\n",
733                                pci_name(dev));
734                         dev->irq = temp;
735                         return -EINVAL;
736         }
737         status = msi_capability_init(dev);
738         if (!status) {
739                 if (!pos)
740                         nr_reserved_vectors--;  /* Only MSI capable */
741                 else if (nr_msix_devices > 0)
742                         nr_msix_devices--;      /* Both MSI and MSI-X capable,
743                                                    but choose enabling MSI */
744         }
745
746         return status;
747 }
748
749 void pci_disable_msi(struct pci_dev* dev)
750 {
751         struct msi_desc *entry;
752         int pos, default_vector;
753         u16 control;
754         unsigned long flags;
755
756         if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
757                 return;
758
759         pci_read_config_word(dev, msi_control_reg(pos), &control);
760         if (!(control & PCI_MSI_FLAGS_ENABLE))
761                 return;
762
763         spin_lock_irqsave(&msi_lock, flags);
764         entry = msi_desc[dev->irq];
765         if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
766                 spin_unlock_irqrestore(&msi_lock, flags);
767                 return;
768         }
769         if (entry->msi_attrib.state) {
770                 spin_unlock_irqrestore(&msi_lock, flags);
771                 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
772                        "free_irq() on MSI vector %d\n",
773                        pci_name(dev), dev->irq);
774                 BUG_ON(entry->msi_attrib.state > 0);
775         } else {
776                 vector_irq[dev->irq] = 0; /* free it */
777                 nr_released_vectors++;
778                 default_vector = entry->msi_attrib.default_vector;
779                 spin_unlock_irqrestore(&msi_lock, flags);
780                 /* Restore dev->irq to its default pin-assertion vector */
781                 dev->irq = default_vector;
782                 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
783                                         PCI_CAP_ID_MSI);
784         }
785 }
786
787 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
788 {
789         struct msi_desc *entry;
790         int head, entry_nr, type;
791         void __iomem *base;
792         unsigned long flags;
793
794         spin_lock_irqsave(&msi_lock, flags);
795         entry = msi_desc[vector];
796         if (!entry || entry->dev != dev) {
797                 spin_unlock_irqrestore(&msi_lock, flags);
798                 return -EINVAL;
799         }
800         type = entry->msi_attrib.type;
801         entry_nr = entry->msi_attrib.entry_nr;
802         head = entry->link.head;
803         base = entry->mask_base;
804         msi_desc[entry->link.head]->link.tail = entry->link.tail;
805         msi_desc[entry->link.tail]->link.head = entry->link.head;
806         entry->dev = NULL;
807         if (!reassign) {
808                 vector_irq[vector] = 0;
809                 nr_released_vectors++;
810         }
811         msi_desc[vector] = NULL;
812         spin_unlock_irqrestore(&msi_lock, flags);
813
814         kmem_cache_free(msi_cachep, entry);
815
816         if (type == PCI_CAP_ID_MSIX) {
817                 if (!reassign)
818                         writel(1, base +
819                                 entry_nr * PCI_MSIX_ENTRY_SIZE +
820                                 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
821
822                 if (head == vector) {
823                         /*
824                          * Detect last MSI-X vector to be released.
825                          * Release the MSI-X memory-mapped table.
826                          */
827                         int pos, nr_entries;
828                         u32 phys_addr, table_offset;
829                         u16 control;
830                         u8 bir;
831
832                         pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
833                         pci_read_config_word(dev, msi_control_reg(pos),
834                                 &control);
835                         nr_entries = multi_msix_capable(control);
836                         pci_read_config_dword(dev, msix_table_offset_reg(pos),
837                                 &table_offset);
838                         bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
839                         phys_addr = pci_resource_start (dev, bir);
840                         phys_addr += (u32)(table_offset &
841                                 ~PCI_MSIX_FLAGS_BIRMASK);
842                         iounmap(base);
843                 }
844         }
845
846         return 0;
847 }
848
849 static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
850 {
851         int vector = head, tail = 0;
852         int i, j = 0, nr_entries = 0;
853         void __iomem *base;
854         unsigned long flags;
855
856         spin_lock_irqsave(&msi_lock, flags);
857         while (head != tail) {
858                 nr_entries++;
859                 tail = msi_desc[vector]->link.tail;
860                 if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
861                         j = vector;
862                 vector = tail;
863         }
864         if (*nvec > nr_entries) {
865                 spin_unlock_irqrestore(&msi_lock, flags);
866                 *nvec = nr_entries;
867                 return -EINVAL;
868         }
869         vector = ((j > 0) ? j : head);
870         for (i = 0; i < *nvec; i++) {
871                 j = msi_desc[vector]->msi_attrib.entry_nr;
872                 msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
873                 vector_irq[vector] = -1;                /* Mark it busy */
874                 nr_released_vectors--;
875                 entries[i].vector = vector;
876                 if (j != (entries + i)->entry) {
877                         base = msi_desc[vector]->mask_base;
878                         msi_desc[vector]->msi_attrib.entry_nr =
879                                 (entries + i)->entry;
880                         writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
881                                 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
882                                 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
883                                 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
884                         writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
885                                 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
886                                 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
887                                 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
888                         writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
889                                 PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
890                                 base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
891                                 PCI_MSIX_ENTRY_DATA_OFFSET);
892                 }
893                 vector = msi_desc[vector]->link.tail;
894         }
895         spin_unlock_irqrestore(&msi_lock, flags);
896
897         return 0;
898 }
899
900 /**
901  * pci_enable_msix - configure device's MSI-X capability structure
902  * @dev: pointer to the pci_dev data structure of MSI-X device function
903  * @entries: pointer to an array of MSI-X entries
904  * @nvec: number of MSI-X vectors requested for allocation by device driver
905  *
906  * Setup the MSI-X capability structure of device function with the number
907  * of requested vectors upon its software driver call to request for
908  * MSI-X mode enabled on its hardware device function. A return of zero
909  * indicates the successful configuration of MSI-X capability structure
910  * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
911  * Or a return of > 0 indicates that driver request is exceeding the number
912  * of vectors available. Driver should use the returned value to re-send
913  * its request.
914  **/
915 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
916 {
917         int status, pos, nr_entries, free_vectors;
918         int i, j, temp;
919         u16 control;
920         unsigned long flags;
921
922         if (!pci_msi_enable || !dev || !entries)
923                 return -EINVAL;
924
925         if ((status = msi_init()) < 0)
926                 return status;
927
928         if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
929                 return -EINVAL;
930
931         pci_read_config_word(dev, msi_control_reg(pos), &control);
932         if (control & PCI_MSIX_FLAGS_ENABLE)
933                 return -EINVAL;                 /* Already in MSI-X mode */
934
935         nr_entries = multi_msix_capable(control);
936         if (nvec > nr_entries)
937                 return -EINVAL;
938
939         /* Check for any invalid entries */
940         for (i = 0; i < nvec; i++) {
941                 if (entries[i].entry >= nr_entries)
942                         return -EINVAL;         /* invalid entry */
943                 for (j = i + 1; j < nvec; j++) {
944                         if (entries[i].entry == entries[j].entry)
945                                 return -EINVAL; /* duplicate entry */
946                 }
947         }
948         temp = dev->irq;
949         if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
950                 /* Lookup Sucess */
951                 nr_entries = nvec;
952                 /* Reroute MSI-X table */
953                 if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
954                         /* #requested > #previous-assigned */
955                         dev->irq = temp;
956                         return nr_entries;
957                 }
958                 dev->irq = temp;
959                 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
960                 return 0;
961         }
962         /* Check whether driver already requested for MSI vector */
963         if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
964                 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
965                 printk(KERN_INFO "PCI: %s: Can't enable MSI-X.  "
966                        "Device already has an MSI vector assigned\n",
967                        pci_name(dev));
968                 dev->irq = temp;
969                 return -EINVAL;
970         }
971
972         spin_lock_irqsave(&msi_lock, flags);
973         /*
974          * msi_lock is provided to ensure that enough vectors resources are
975          * available before granting.
976          */
977         free_vectors = pci_vector_resources(last_alloc_vector,
978                                 nr_released_vectors);
979         /* Ensure that each MSI/MSI-X device has one vector reserved by
980            default to avoid any MSI-X driver to take all available
981            resources */
982         free_vectors -= nr_reserved_vectors;
983         /* Find the average of free vectors among MSI-X devices */
984         if (nr_msix_devices > 0)
985                 free_vectors /= nr_msix_devices;
986         spin_unlock_irqrestore(&msi_lock, flags);
987
988         if (nvec > free_vectors) {
989                 if (free_vectors > 0)
990                         return free_vectors;
991                 else
992                         return -EBUSY;
993         }
994
995         status = msix_capability_init(dev, entries, nvec);
996         if (!status && nr_msix_devices > 0)
997                 nr_msix_devices--;
998
999         return status;
1000 }
1001
1002 void pci_disable_msix(struct pci_dev* dev)
1003 {
1004         int pos, temp;
1005         u16 control;
1006
1007         if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
1008                 return;
1009
1010         pci_read_config_word(dev, msi_control_reg(pos), &control);
1011         if (!(control & PCI_MSIX_FLAGS_ENABLE))
1012                 return;
1013
1014         temp = dev->irq;
1015         if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1016                 int state, vector, head, tail = 0, warning = 0;
1017                 unsigned long flags;
1018
1019                 vector = head = dev->irq;
1020                 spin_lock_irqsave(&msi_lock, flags);
1021                 while (head != tail) {
1022                         state = msi_desc[vector]->msi_attrib.state;
1023                         if (state)
1024                                 warning = 1;
1025                         else {
1026                                 vector_irq[vector] = 0; /* free it */
1027                                 nr_released_vectors++;
1028                         }
1029                         tail = msi_desc[vector]->link.tail;
1030                         vector = tail;
1031                 }
1032                 spin_unlock_irqrestore(&msi_lock, flags);
1033                 if (warning) {
1034                         dev->irq = temp;
1035                         printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1036                                "free_irq() on all MSI-X vectors\n",
1037                                pci_name(dev));
1038                         BUG_ON(warning > 0);
1039                 } else {
1040                         dev->irq = temp;
1041                         disable_msi_mode(dev,
1042                                 pci_find_capability(dev, PCI_CAP_ID_MSIX),
1043                                 PCI_CAP_ID_MSIX);
1044
1045                 }
1046         }
1047 }
1048
1049 /**
1050  * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1051  * @dev: pointer to the pci_dev data structure of MSI(X) device function
1052  *
1053  * Being called during hotplug remove, from which the device function
1054  * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1055  * allocated for this device function, are reclaimed to unused state,
1056  * which may be used later on.
1057  **/
1058 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1059 {
1060         int state, pos, temp;
1061         unsigned long flags;
1062
1063         if (!pci_msi_enable || !dev)
1064                 return;
1065
1066         temp = dev->irq;                /* Save IOAPIC IRQ */
1067         if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 &&
1068                 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1069                 spin_lock_irqsave(&msi_lock, flags);
1070                 state = msi_desc[dev->irq]->msi_attrib.state;
1071                 spin_unlock_irqrestore(&msi_lock, flags);
1072                 if (state) {
1073                         printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1074                                "called without free_irq() on MSI vector %d\n",
1075                                pci_name(dev), dev->irq);
1076                         BUG_ON(state > 0);
1077                 } else /* Release MSI vector assigned to this device */
1078                         msi_free_vector(dev, dev->irq, 0);
1079                 dev->irq = temp;                /* Restore IOAPIC IRQ */
1080         }
1081         if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
1082                 !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1083                 int vector, head, tail = 0, warning = 0;
1084                 void __iomem *base = NULL;
1085
1086                 vector = head = dev->irq;
1087                 while (head != tail) {
1088                         spin_lock_irqsave(&msi_lock, flags);
1089                         state = msi_desc[vector]->msi_attrib.state;
1090                         tail = msi_desc[vector]->link.tail;
1091                         base = msi_desc[vector]->mask_base;
1092                         spin_unlock_irqrestore(&msi_lock, flags);
1093                         if (state)
1094                                 warning = 1;
1095                         else if (vector != head) /* Release MSI-X vector */
1096                                 msi_free_vector(dev, vector, 0);
1097                         vector = tail;
1098                 }
1099                 msi_free_vector(dev, vector, 0);
1100                 if (warning) {
1101                         /* Force to release the MSI-X memory-mapped table */
1102                         u32 phys_addr, table_offset;
1103                         u16 control;
1104                         u8 bir;
1105
1106                         pci_read_config_word(dev, msi_control_reg(pos),
1107                                 &control);
1108                         pci_read_config_dword(dev, msix_table_offset_reg(pos),
1109                                 &table_offset);
1110                         bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
1111                         phys_addr = pci_resource_start (dev, bir);
1112                         phys_addr += (u32)(table_offset &
1113                                 ~PCI_MSIX_FLAGS_BIRMASK);
1114                         iounmap(base);
1115                         printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1116                                "called without free_irq() on all MSI-X vectors\n",
1117                                pci_name(dev));
1118                         BUG_ON(warning > 0);
1119                 }
1120                 dev->irq = temp;                /* Restore IOAPIC IRQ */
1121         }
1122 }
1123
1124 EXPORT_SYMBOL(pci_enable_msi);
1125 EXPORT_SYMBOL(pci_disable_msi);
1126 EXPORT_SYMBOL(pci_enable_msix);
1127 EXPORT_SYMBOL(pci_disable_msix);