sparc64: Use cpumask_t pointers and for_each_cpu_mask_nr() in xcall_deliver.
[linux-2.6] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16
17 #include <asm/iommu.h>
18 #include <asm/irq.h>
19 #include <asm/upa.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
23 #include <asm/prom.h>
24
25 #include "pci_impl.h"
26 #include "iommu_common.h"
27
28 #include "pci_sun4v.h"
29
30 static unsigned long vpci_major = 1;
31 static unsigned long vpci_minor = 1;
32
33 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
34
35 struct iommu_batch {
36         struct device   *dev;           /* Device mapping is for.       */
37         unsigned long   prot;           /* IOMMU page protections       */
38         unsigned long   entry;          /* Index into IOTSB.            */
39         u64             *pglist;        /* List of physical pages       */
40         unsigned long   npages;         /* Number of pages in list.     */
41 };
42
43 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
44
45 /* Interrupts must be disabled.  */
46 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
47 {
48         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
49
50         p->dev          = dev;
51         p->prot         = prot;
52         p->entry        = entry;
53         p->npages       = 0;
54 }
55
56 /* Interrupts must be disabled.  */
57 static long iommu_batch_flush(struct iommu_batch *p)
58 {
59         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60         unsigned long devhandle = pbm->devhandle;
61         unsigned long prot = p->prot;
62         unsigned long entry = p->entry;
63         u64 *pglist = p->pglist;
64         unsigned long npages = p->npages;
65
66         while (npages != 0) {
67                 long num;
68
69                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
70                                           npages, prot, __pa(pglist));
71                 if (unlikely(num < 0)) {
72                         if (printk_ratelimit())
73                                 printk("iommu_batch_flush: IOMMU map of "
74                                        "[%08lx:%08lx:%lx:%lx:%lx] failed with "
75                                        "status %ld\n",
76                                        devhandle, HV_PCI_TSBID(0, entry),
77                                        npages, prot, __pa(pglist), num);
78                         return -1;
79                 }
80
81                 entry += num;
82                 npages -= num;
83                 pglist += num;
84         }
85
86         p->entry = entry;
87         p->npages = 0;
88
89         return 0;
90 }
91
92 static inline void iommu_batch_new_entry(unsigned long entry)
93 {
94         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
95
96         if (p->entry + p->npages == entry)
97                 return;
98         if (p->entry != ~0UL)
99                 iommu_batch_flush(p);
100         p->entry = entry;
101 }
102
103 /* Interrupts must be disabled.  */
104 static inline long iommu_batch_add(u64 phys_page)
105 {
106         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
107
108         BUG_ON(p->npages >= PGLIST_NENTS);
109
110         p->pglist[p->npages++] = phys_page;
111         if (p->npages == PGLIST_NENTS)
112                 return iommu_batch_flush(p);
113
114         return 0;
115 }
116
117 /* Interrupts must be disabled.  */
118 static inline long iommu_batch_end(void)
119 {
120         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
121
122         BUG_ON(p->npages >= PGLIST_NENTS);
123
124         return iommu_batch_flush(p);
125 }
126
127 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
128                                    dma_addr_t *dma_addrp, gfp_t gfp)
129 {
130         unsigned long flags, order, first_page, npages, n;
131         struct iommu *iommu;
132         struct page *page;
133         void *ret;
134         long entry;
135         int nid;
136
137         size = IO_PAGE_ALIGN(size);
138         order = get_order(size);
139         if (unlikely(order >= MAX_ORDER))
140                 return NULL;
141
142         npages = size >> IO_PAGE_SHIFT;
143
144         nid = dev->archdata.numa_node;
145         page = alloc_pages_node(nid, gfp, order);
146         if (unlikely(!page))
147                 return NULL;
148
149         first_page = (unsigned long) page_address(page);
150         memset((char *)first_page, 0, PAGE_SIZE << order);
151
152         iommu = dev->archdata.iommu;
153
154         spin_lock_irqsave(&iommu->lock, flags);
155         entry = iommu_range_alloc(dev, iommu, npages, NULL);
156         spin_unlock_irqrestore(&iommu->lock, flags);
157
158         if (unlikely(entry == DMA_ERROR_CODE))
159                 goto range_alloc_fail;
160
161         *dma_addrp = (iommu->page_table_map_base +
162                       (entry << IO_PAGE_SHIFT));
163         ret = (void *) first_page;
164         first_page = __pa(first_page);
165
166         local_irq_save(flags);
167
168         iommu_batch_start(dev,
169                           (HV_PCI_MAP_ATTR_READ |
170                            HV_PCI_MAP_ATTR_WRITE),
171                           entry);
172
173         for (n = 0; n < npages; n++) {
174                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
175                 if (unlikely(err < 0L))
176                         goto iommu_map_fail;
177         }
178
179         if (unlikely(iommu_batch_end() < 0L))
180                 goto iommu_map_fail;
181
182         local_irq_restore(flags);
183
184         return ret;
185
186 iommu_map_fail:
187         /* Interrupts are disabled.  */
188         spin_lock(&iommu->lock);
189         iommu_range_free(iommu, *dma_addrp, npages);
190         spin_unlock_irqrestore(&iommu->lock, flags);
191
192 range_alloc_fail:
193         free_pages(first_page, order);
194         return NULL;
195 }
196
197 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
198                                  dma_addr_t dvma)
199 {
200         struct pci_pbm_info *pbm;
201         struct iommu *iommu;
202         unsigned long flags, order, npages, entry;
203         u32 devhandle;
204
205         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
206         iommu = dev->archdata.iommu;
207         pbm = dev->archdata.host_controller;
208         devhandle = pbm->devhandle;
209         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
210
211         spin_lock_irqsave(&iommu->lock, flags);
212
213         iommu_range_free(iommu, dvma, npages);
214
215         do {
216                 unsigned long num;
217
218                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
219                                             npages);
220                 entry += num;
221                 npages -= num;
222         } while (npages != 0);
223
224         spin_unlock_irqrestore(&iommu->lock, flags);
225
226         order = get_order(size);
227         if (order < 10)
228                 free_pages((unsigned long)cpu, order);
229 }
230
231 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
232                                     enum dma_data_direction direction)
233 {
234         struct iommu *iommu;
235         unsigned long flags, npages, oaddr;
236         unsigned long i, base_paddr;
237         u32 bus_addr, ret;
238         unsigned long prot;
239         long entry;
240
241         iommu = dev->archdata.iommu;
242
243         if (unlikely(direction == DMA_NONE))
244                 goto bad;
245
246         oaddr = (unsigned long)ptr;
247         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
248         npages >>= IO_PAGE_SHIFT;
249
250         spin_lock_irqsave(&iommu->lock, flags);
251         entry = iommu_range_alloc(dev, iommu, npages, NULL);
252         spin_unlock_irqrestore(&iommu->lock, flags);
253
254         if (unlikely(entry == DMA_ERROR_CODE))
255                 goto bad;
256
257         bus_addr = (iommu->page_table_map_base +
258                     (entry << IO_PAGE_SHIFT));
259         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
260         base_paddr = __pa(oaddr & IO_PAGE_MASK);
261         prot = HV_PCI_MAP_ATTR_READ;
262         if (direction != DMA_TO_DEVICE)
263                 prot |= HV_PCI_MAP_ATTR_WRITE;
264
265         local_irq_save(flags);
266
267         iommu_batch_start(dev, prot, entry);
268
269         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
270                 long err = iommu_batch_add(base_paddr);
271                 if (unlikely(err < 0L))
272                         goto iommu_map_fail;
273         }
274         if (unlikely(iommu_batch_end() < 0L))
275                 goto iommu_map_fail;
276
277         local_irq_restore(flags);
278
279         return ret;
280
281 bad:
282         if (printk_ratelimit())
283                 WARN_ON(1);
284         return DMA_ERROR_CODE;
285
286 iommu_map_fail:
287         /* Interrupts are disabled.  */
288         spin_lock(&iommu->lock);
289         iommu_range_free(iommu, bus_addr, npages);
290         spin_unlock_irqrestore(&iommu->lock, flags);
291
292         return DMA_ERROR_CODE;
293 }
294
295 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
296                                 size_t sz, enum dma_data_direction direction)
297 {
298         struct pci_pbm_info *pbm;
299         struct iommu *iommu;
300         unsigned long flags, npages;
301         long entry;
302         u32 devhandle;
303
304         if (unlikely(direction == DMA_NONE)) {
305                 if (printk_ratelimit())
306                         WARN_ON(1);
307                 return;
308         }
309
310         iommu = dev->archdata.iommu;
311         pbm = dev->archdata.host_controller;
312         devhandle = pbm->devhandle;
313
314         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
315         npages >>= IO_PAGE_SHIFT;
316         bus_addr &= IO_PAGE_MASK;
317
318         spin_lock_irqsave(&iommu->lock, flags);
319
320         iommu_range_free(iommu, bus_addr, npages);
321
322         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
323         do {
324                 unsigned long num;
325
326                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
327                                             npages);
328                 entry += num;
329                 npages -= num;
330         } while (npages != 0);
331
332         spin_unlock_irqrestore(&iommu->lock, flags);
333 }
334
335 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
336                          int nelems, enum dma_data_direction direction)
337 {
338         struct scatterlist *s, *outs, *segstart;
339         unsigned long flags, handle, prot;
340         dma_addr_t dma_next = 0, dma_addr;
341         unsigned int max_seg_size;
342         unsigned long seg_boundary_size;
343         int outcount, incount, i;
344         struct iommu *iommu;
345         unsigned long base_shift;
346         long err;
347
348         BUG_ON(direction == DMA_NONE);
349
350         iommu = dev->archdata.iommu;
351         if (nelems == 0 || !iommu)
352                 return 0;
353         
354         prot = HV_PCI_MAP_ATTR_READ;
355         if (direction != DMA_TO_DEVICE)
356                 prot |= HV_PCI_MAP_ATTR_WRITE;
357
358         outs = s = segstart = &sglist[0];
359         outcount = 1;
360         incount = nelems;
361         handle = 0;
362
363         /* Init first segment length for backout at failure */
364         outs->dma_length = 0;
365
366         spin_lock_irqsave(&iommu->lock, flags);
367
368         iommu_batch_start(dev, prot, ~0UL);
369
370         max_seg_size = dma_get_max_seg_size(dev);
371         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
372                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
373         base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
374         for_each_sg(sglist, s, nelems, i) {
375                 unsigned long paddr, npages, entry, out_entry = 0, slen;
376
377                 slen = s->length;
378                 /* Sanity check */
379                 if (slen == 0) {
380                         dma_next = 0;
381                         continue;
382                 }
383                 /* Allocate iommu entries for that segment */
384                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
385                 npages = iommu_num_pages(paddr, slen);
386                 entry = iommu_range_alloc(dev, iommu, npages, &handle);
387
388                 /* Handle failure */
389                 if (unlikely(entry == DMA_ERROR_CODE)) {
390                         if (printk_ratelimit())
391                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
392                                        " npages %lx\n", iommu, paddr, npages);
393                         goto iommu_map_failed;
394                 }
395
396                 iommu_batch_new_entry(entry);
397
398                 /* Convert entry to a dma_addr_t */
399                 dma_addr = iommu->page_table_map_base +
400                         (entry << IO_PAGE_SHIFT);
401                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
402
403                 /* Insert into HW table */
404                 paddr &= IO_PAGE_MASK;
405                 while (npages--) {
406                         err = iommu_batch_add(paddr);
407                         if (unlikely(err < 0L))
408                                 goto iommu_map_failed;
409                         paddr += IO_PAGE_SIZE;
410                 }
411
412                 /* If we are in an open segment, try merging */
413                 if (segstart != s) {
414                         /* We cannot merge if:
415                          * - allocated dma_addr isn't contiguous to previous allocation
416                          */
417                         if ((dma_addr != dma_next) ||
418                             (outs->dma_length + s->length > max_seg_size) ||
419                             (is_span_boundary(out_entry, base_shift,
420                                               seg_boundary_size, outs, s))) {
421                                 /* Can't merge: create a new segment */
422                                 segstart = s;
423                                 outcount++;
424                                 outs = sg_next(outs);
425                         } else {
426                                 outs->dma_length += s->length;
427                         }
428                 }
429
430                 if (segstart == s) {
431                         /* This is a new segment, fill entries */
432                         outs->dma_address = dma_addr;
433                         outs->dma_length = slen;
434                         out_entry = entry;
435                 }
436
437                 /* Calculate next page pointer for contiguous check */
438                 dma_next = dma_addr + slen;
439         }
440
441         err = iommu_batch_end();
442
443         if (unlikely(err < 0L))
444                 goto iommu_map_failed;
445
446         spin_unlock_irqrestore(&iommu->lock, flags);
447
448         if (outcount < incount) {
449                 outs = sg_next(outs);
450                 outs->dma_address = DMA_ERROR_CODE;
451                 outs->dma_length = 0;
452         }
453
454         return outcount;
455
456 iommu_map_failed:
457         for_each_sg(sglist, s, nelems, i) {
458                 if (s->dma_length != 0) {
459                         unsigned long vaddr, npages;
460
461                         vaddr = s->dma_address & IO_PAGE_MASK;
462                         npages = iommu_num_pages(s->dma_address, s->dma_length);
463                         iommu_range_free(iommu, vaddr, npages);
464                         /* XXX demap? XXX */
465                         s->dma_address = DMA_ERROR_CODE;
466                         s->dma_length = 0;
467                 }
468                 if (s == outs)
469                         break;
470         }
471         spin_unlock_irqrestore(&iommu->lock, flags);
472
473         return 0;
474 }
475
476 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
477                             int nelems, enum dma_data_direction direction)
478 {
479         struct pci_pbm_info *pbm;
480         struct scatterlist *sg;
481         struct iommu *iommu;
482         unsigned long flags;
483         u32 devhandle;
484
485         BUG_ON(direction == DMA_NONE);
486
487         iommu = dev->archdata.iommu;
488         pbm = dev->archdata.host_controller;
489         devhandle = pbm->devhandle;
490         
491         spin_lock_irqsave(&iommu->lock, flags);
492
493         sg = sglist;
494         while (nelems--) {
495                 dma_addr_t dma_handle = sg->dma_address;
496                 unsigned int len = sg->dma_length;
497                 unsigned long npages, entry;
498
499                 if (!len)
500                         break;
501                 npages = iommu_num_pages(dma_handle, len);
502                 iommu_range_free(iommu, dma_handle, npages);
503
504                 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
505                 while (npages) {
506                         unsigned long num;
507
508                         num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
509                                                     npages);
510                         entry += num;
511                         npages -= num;
512                 }
513
514                 sg = sg_next(sg);
515         }
516
517         spin_unlock_irqrestore(&iommu->lock, flags);
518 }
519
520 static void dma_4v_sync_single_for_cpu(struct device *dev,
521                                        dma_addr_t bus_addr, size_t sz,
522                                        enum dma_data_direction direction)
523 {
524         /* Nothing to do... */
525 }
526
527 static void dma_4v_sync_sg_for_cpu(struct device *dev,
528                                    struct scatterlist *sglist, int nelems,
529                                    enum dma_data_direction direction)
530 {
531         /* Nothing to do... */
532 }
533
534 static const struct dma_ops sun4v_dma_ops = {
535         .alloc_coherent                 = dma_4v_alloc_coherent,
536         .free_coherent                  = dma_4v_free_coherent,
537         .map_single                     = dma_4v_map_single,
538         .unmap_single                   = dma_4v_unmap_single,
539         .map_sg                         = dma_4v_map_sg,
540         .unmap_sg                       = dma_4v_unmap_sg,
541         .sync_single_for_cpu            = dma_4v_sync_single_for_cpu,
542         .sync_sg_for_cpu                = dma_4v_sync_sg_for_cpu,
543 };
544
545 static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
546 {
547         struct property *prop;
548         struct device_node *dp;
549
550         dp = pbm->prom_node;
551         prop = of_find_property(dp, "66mhz-capable", NULL);
552         pbm->is_66mhz_capable = (prop != NULL);
553         pbm->pci_bus = pci_scan_one_pbm(pbm);
554
555         /* XXX register error interrupt handlers XXX */
556 }
557
558 static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
559                                                    struct iommu *iommu)
560 {
561         struct iommu_arena *arena = &iommu->arena;
562         unsigned long i, cnt = 0;
563         u32 devhandle;
564
565         devhandle = pbm->devhandle;
566         for (i = 0; i < arena->limit; i++) {
567                 unsigned long ret, io_attrs, ra;
568
569                 ret = pci_sun4v_iommu_getmap(devhandle,
570                                              HV_PCI_TSBID(0, i),
571                                              &io_attrs, &ra);
572                 if (ret == HV_EOK) {
573                         if (page_in_phys_avail(ra)) {
574                                 pci_sun4v_iommu_demap(devhandle,
575                                                       HV_PCI_TSBID(0, i), 1);
576                         } else {
577                                 cnt++;
578                                 __set_bit(i, arena->map);
579                         }
580                 }
581         }
582
583         return cnt;
584 }
585
586 static void __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
587 {
588         struct iommu *iommu = pbm->iommu;
589         struct property *prop;
590         unsigned long num_tsb_entries, sz, tsbsize;
591         u32 vdma[2], dma_mask, dma_offset;
592
593         prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
594         if (prop) {
595                 u32 *val = prop->value;
596
597                 vdma[0] = val[0];
598                 vdma[1] = val[1];
599         } else {
600                 /* No property, use default values. */
601                 vdma[0] = 0x80000000;
602                 vdma[1] = 0x80000000;
603         }
604
605         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
606                 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
607                             vdma[0], vdma[1]);
608                 prom_halt();
609         };
610
611         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
612         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
613         tsbsize = num_tsb_entries * sizeof(iopte_t);
614
615         dma_offset = vdma[0];
616
617         /* Setup initial software IOMMU state. */
618         spin_lock_init(&iommu->lock);
619         iommu->ctx_lowest_free = 1;
620         iommu->page_table_map_base = dma_offset;
621         iommu->dma_addr_mask = dma_mask;
622
623         /* Allocate and initialize the free area map.  */
624         sz = (num_tsb_entries + 7) / 8;
625         sz = (sz + 7UL) & ~7UL;
626         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
627         if (!iommu->arena.map) {
628                 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
629                 prom_halt();
630         }
631         iommu->arena.limit = num_tsb_entries;
632
633         sz = probe_existing_entries(pbm, iommu);
634         if (sz)
635                 printk("%s: Imported %lu TSB entries from OBP\n",
636                        pbm->name, sz);
637 }
638
639 #ifdef CONFIG_PCI_MSI
640 struct pci_sun4v_msiq_entry {
641         u64             version_type;
642 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
643 #define MSIQ_VERSION_SHIFT              32
644 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
645 #define MSIQ_TYPE_SHIFT                 0
646 #define MSIQ_TYPE_NONE                  0x00
647 #define MSIQ_TYPE_MSG                   0x01
648 #define MSIQ_TYPE_MSI32                 0x02
649 #define MSIQ_TYPE_MSI64                 0x03
650 #define MSIQ_TYPE_INTX                  0x08
651 #define MSIQ_TYPE_NONE2                 0xff
652
653         u64             intx_sysino;
654         u64             reserved1;
655         u64             stick;
656         u64             req_id;  /* bus/device/func */
657 #define MSIQ_REQID_BUS_MASK             0xff00UL
658 #define MSIQ_REQID_BUS_SHIFT            8
659 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
660 #define MSIQ_REQID_DEVICE_SHIFT         3
661 #define MSIQ_REQID_FUNC_MASK            0x0007UL
662 #define MSIQ_REQID_FUNC_SHIFT           0
663
664         u64             msi_address;
665
666         /* The format of this value is message type dependent.
667          * For MSI bits 15:0 are the data from the MSI packet.
668          * For MSI-X bits 31:0 are the data from the MSI packet.
669          * For MSG, the message code and message routing code where:
670          *      bits 39:32 is the bus/device/fn of the msg target-id
671          *      bits 18:16 is the message routing code
672          *      bits 7:0 is the message code
673          * For INTx the low order 2-bits are:
674          *      00 - INTA
675          *      01 - INTB
676          *      10 - INTC
677          *      11 - INTD
678          */
679         u64             msi_data;
680
681         u64             reserved2;
682 };
683
684 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
685                               unsigned long *head)
686 {
687         unsigned long err, limit;
688
689         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
690         if (unlikely(err))
691                 return -ENXIO;
692
693         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
694         if (unlikely(*head >= limit))
695                 return -EFBIG;
696
697         return 0;
698 }
699
700 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
701                                  unsigned long msiqid, unsigned long *head,
702                                  unsigned long *msi)
703 {
704         struct pci_sun4v_msiq_entry *ep;
705         unsigned long err, type;
706
707         /* Note: void pointer arithmetic, 'head' is a byte offset  */
708         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
709                                  (pbm->msiq_ent_count *
710                                   sizeof(struct pci_sun4v_msiq_entry))) +
711               *head);
712
713         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
714                 return 0;
715
716         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
717         if (unlikely(type != MSIQ_TYPE_MSI32 &&
718                      type != MSIQ_TYPE_MSI64))
719                 return -EINVAL;
720
721         *msi = ep->msi_data;
722
723         err = pci_sun4v_msi_setstate(pbm->devhandle,
724                                      ep->msi_data /* msi_num */,
725                                      HV_MSISTATE_IDLE);
726         if (unlikely(err))
727                 return -ENXIO;
728
729         /* Clear the entry.  */
730         ep->version_type &= ~MSIQ_TYPE_MASK;
731
732         (*head) += sizeof(struct pci_sun4v_msiq_entry);
733         if (*head >=
734             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
735                 *head = 0;
736
737         return 1;
738 }
739
740 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
741                               unsigned long head)
742 {
743         unsigned long err;
744
745         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
746         if (unlikely(err))
747                 return -EINVAL;
748
749         return 0;
750 }
751
752 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
753                                unsigned long msi, int is_msi64)
754 {
755         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
756                                   (is_msi64 ?
757                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
758                 return -ENXIO;
759         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
760                 return -ENXIO;
761         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
762                 return -ENXIO;
763         return 0;
764 }
765
766 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
767 {
768         unsigned long err, msiqid;
769
770         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
771         if (err)
772                 return -ENXIO;
773
774         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
775
776         return 0;
777 }
778
779 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
780 {
781         unsigned long q_size, alloc_size, pages, order;
782         int i;
783
784         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
785         alloc_size = (pbm->msiq_num * q_size);
786         order = get_order(alloc_size);
787         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
788         if (pages == 0UL) {
789                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
790                        order);
791                 return -ENOMEM;
792         }
793         memset((char *)pages, 0, PAGE_SIZE << order);
794         pbm->msi_queues = (void *) pages;
795
796         for (i = 0; i < pbm->msiq_num; i++) {
797                 unsigned long err, base = __pa(pages + (i * q_size));
798                 unsigned long ret1, ret2;
799
800                 err = pci_sun4v_msiq_conf(pbm->devhandle,
801                                           pbm->msiq_first + i,
802                                           base, pbm->msiq_ent_count);
803                 if (err) {
804                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
805                                err);
806                         goto h_error;
807                 }
808
809                 err = pci_sun4v_msiq_info(pbm->devhandle,
810                                           pbm->msiq_first + i,
811                                           &ret1, &ret2);
812                 if (err) {
813                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
814                                err);
815                         goto h_error;
816                 }
817                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
818                         printk(KERN_ERR "MSI: Bogus qconf "
819                                "expected[%lx:%x] got[%lx:%lx]\n",
820                                base, pbm->msiq_ent_count,
821                                ret1, ret2);
822                         goto h_error;
823                 }
824         }
825
826         return 0;
827
828 h_error:
829         free_pages(pages, order);
830         return -EINVAL;
831 }
832
833 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
834 {
835         unsigned long q_size, alloc_size, pages, order;
836         int i;
837
838         for (i = 0; i < pbm->msiq_num; i++) {
839                 unsigned long msiqid = pbm->msiq_first + i;
840
841                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
842         }
843
844         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
845         alloc_size = (pbm->msiq_num * q_size);
846         order = get_order(alloc_size);
847
848         pages = (unsigned long) pbm->msi_queues;
849
850         free_pages(pages, order);
851
852         pbm->msi_queues = NULL;
853 }
854
855 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
856                                     unsigned long msiqid,
857                                     unsigned long devino)
858 {
859         unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
860
861         if (!virt_irq)
862                 return -ENOMEM;
863
864         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
865                 return -EINVAL;
866         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
867                 return -EINVAL;
868
869         return virt_irq;
870 }
871
872 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
873         .get_head       =       pci_sun4v_get_head,
874         .dequeue_msi    =       pci_sun4v_dequeue_msi,
875         .set_head       =       pci_sun4v_set_head,
876         .msi_setup      =       pci_sun4v_msi_setup,
877         .msi_teardown   =       pci_sun4v_msi_teardown,
878         .msiq_alloc     =       pci_sun4v_msiq_alloc,
879         .msiq_free      =       pci_sun4v_msiq_free,
880         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
881 };
882
883 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
884 {
885         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
886 }
887 #else /* CONFIG_PCI_MSI */
888 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
889 {
890 }
891 #endif /* !(CONFIG_PCI_MSI) */
892
893 static void __init pci_sun4v_pbm_init(struct pci_controller_info *p,
894                                       struct device_node *dp, u32 devhandle)
895 {
896         struct pci_pbm_info *pbm;
897
898         if (devhandle & 0x40)
899                 pbm = &p->pbm_B;
900         else
901                 pbm = &p->pbm_A;
902
903         pbm->next = pci_pbm_root;
904         pci_pbm_root = pbm;
905
906         pbm->numa_node = of_node_to_nid(dp);
907
908         pbm->scan_bus = pci_sun4v_scan_bus;
909         pbm->pci_ops = &sun4v_pci_ops;
910         pbm->config_space_reg_bits = 12;
911
912         pbm->index = pci_num_pbms++;
913
914         pbm->parent = p;
915         pbm->prom_node = dp;
916
917         pbm->devhandle = devhandle;
918
919         pbm->name = dp->full_name;
920
921         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
922         printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
923
924         pci_determine_mem_io_space(pbm);
925
926         pci_get_pbm_props(pbm);
927         pci_sun4v_iommu_init(pbm);
928         pci_sun4v_msi_init(pbm);
929 }
930
931 void __init sun4v_pci_init(struct device_node *dp, char *model_name)
932 {
933         static int hvapi_negotiated = 0;
934         struct pci_controller_info *p;
935         struct pci_pbm_info *pbm;
936         struct iommu *iommu;
937         struct property *prop;
938         struct linux_prom64_registers *regs;
939         u32 devhandle;
940         int i;
941
942         if (!hvapi_negotiated++) {
943                 int err = sun4v_hvapi_register(HV_GRP_PCI,
944                                                vpci_major,
945                                                &vpci_minor);
946
947                 if (err) {
948                         prom_printf("SUN4V_PCI: Could not register hvapi, "
949                                     "err=%d\n", err);
950                         prom_halt();
951                 }
952                 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
953                        vpci_major, vpci_minor);
954
955                 dma_ops = &sun4v_dma_ops;
956         }
957
958         prop = of_find_property(dp, "reg", NULL);
959         if (!prop) {
960                 prom_printf("SUN4V_PCI: Could not find config registers\n");
961                 prom_halt();
962         }
963         regs = prop->value;
964
965         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
966
967         for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
968                 if (pbm->devhandle == (devhandle ^ 0x40)) {
969                         pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
970                         return;
971                 }
972         }
973
974         for_each_possible_cpu(i) {
975                 unsigned long page = get_zeroed_page(GFP_ATOMIC);
976
977                 if (!page)
978                         goto fatal_memory_error;
979
980                 per_cpu(iommu_batch, i).pglist = (u64 *) page;
981         }
982
983         p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
984         if (!p)
985                 goto fatal_memory_error;
986
987         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
988         if (!iommu)
989                 goto fatal_memory_error;
990
991         p->pbm_A.iommu = iommu;
992
993         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
994         if (!iommu)
995                 goto fatal_memory_error;
996
997         p->pbm_B.iommu = iommu;
998
999         pci_sun4v_pbm_init(p, dp, devhandle);
1000         return;
1001
1002 fatal_memory_error:
1003         prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1004         prom_halt();
1005 }