Merge branch 'bug-fixes' of git://farnsworth.org/dale/linux-2.6-mv643xx_eth into...
[linux-2.6] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16
17 #include <asm/iommu.h>
18 #include <asm/irq.h>
19 #include <asm/upa.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
23 #include <asm/prom.h>
24
25 #include "pci_impl.h"
26 #include "iommu_common.h"
27
28 #include "pci_sun4v.h"
29
30 static unsigned long vpci_major = 1;
31 static unsigned long vpci_minor = 1;
32
33 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
34
35 struct iommu_batch {
36         struct device   *dev;           /* Device mapping is for.       */
37         unsigned long   prot;           /* IOMMU page protections       */
38         unsigned long   entry;          /* Index into IOTSB.            */
39         u64             *pglist;        /* List of physical pages       */
40         unsigned long   npages;         /* Number of pages in list.     */
41 };
42
43 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
44
45 /* Interrupts must be disabled.  */
46 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
47 {
48         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
49
50         p->dev          = dev;
51         p->prot         = prot;
52         p->entry        = entry;
53         p->npages       = 0;
54 }
55
56 /* Interrupts must be disabled.  */
57 static long iommu_batch_flush(struct iommu_batch *p)
58 {
59         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60         unsigned long devhandle = pbm->devhandle;
61         unsigned long prot = p->prot;
62         unsigned long entry = p->entry;
63         u64 *pglist = p->pglist;
64         unsigned long npages = p->npages;
65
66         while (npages != 0) {
67                 long num;
68
69                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
70                                           npages, prot, __pa(pglist));
71                 if (unlikely(num < 0)) {
72                         if (printk_ratelimit())
73                                 printk("iommu_batch_flush: IOMMU map of "
74                                        "[%08lx:%08lx:%lx:%lx:%lx] failed with "
75                                        "status %ld\n",
76                                        devhandle, HV_PCI_TSBID(0, entry),
77                                        npages, prot, __pa(pglist), num);
78                         return -1;
79                 }
80
81                 entry += num;
82                 npages -= num;
83                 pglist += num;
84         }
85
86         p->entry = entry;
87         p->npages = 0;
88
89         return 0;
90 }
91
92 /* Interrupts must be disabled.  */
93 static inline long iommu_batch_add(u64 phys_page)
94 {
95         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
96
97         BUG_ON(p->npages >= PGLIST_NENTS);
98
99         p->pglist[p->npages++] = phys_page;
100         if (p->npages == PGLIST_NENTS)
101                 return iommu_batch_flush(p);
102
103         return 0;
104 }
105
106 /* Interrupts must be disabled.  */
107 static inline long iommu_batch_end(void)
108 {
109         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110
111         BUG_ON(p->npages >= PGLIST_NENTS);
112
113         return iommu_batch_flush(p);
114 }
115
116 static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
117 {
118         unsigned long n, i, start, end, limit;
119         int pass;
120
121         limit = arena->limit;
122         start = arena->hint;
123         pass = 0;
124
125 again:
126         n = find_next_zero_bit(arena->map, limit, start);
127         end = n + npages;
128         if (unlikely(end >= limit)) {
129                 if (likely(pass < 1)) {
130                         limit = start;
131                         start = 0;
132                         pass++;
133                         goto again;
134                 } else {
135                         /* Scanned the whole thing, give up. */
136                         return -1;
137                 }
138         }
139
140         for (i = n; i < end; i++) {
141                 if (test_bit(i, arena->map)) {
142                         start = i + 1;
143                         goto again;
144                 }
145         }
146
147         for (i = n; i < end; i++)
148                 __set_bit(i, arena->map);
149
150         arena->hint = end;
151
152         return n;
153 }
154
155 static void arena_free(struct iommu_arena *arena, unsigned long base,
156                        unsigned long npages)
157 {
158         unsigned long i;
159
160         for (i = base; i < (base + npages); i++)
161                 __clear_bit(i, arena->map);
162 }
163
164 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
165                                    dma_addr_t *dma_addrp, gfp_t gfp)
166 {
167         struct iommu *iommu;
168         unsigned long flags, order, first_page, npages, n;
169         void *ret;
170         long entry;
171
172         size = IO_PAGE_ALIGN(size);
173         order = get_order(size);
174         if (unlikely(order >= MAX_ORDER))
175                 return NULL;
176
177         npages = size >> IO_PAGE_SHIFT;
178
179         first_page = __get_free_pages(gfp, order);
180         if (unlikely(first_page == 0UL))
181                 return NULL;
182
183         memset((char *)first_page, 0, PAGE_SIZE << order);
184
185         iommu = dev->archdata.iommu;
186
187         spin_lock_irqsave(&iommu->lock, flags);
188         entry = arena_alloc(&iommu->arena, npages);
189         spin_unlock_irqrestore(&iommu->lock, flags);
190
191         if (unlikely(entry < 0L))
192                 goto arena_alloc_fail;
193
194         *dma_addrp = (iommu->page_table_map_base +
195                       (entry << IO_PAGE_SHIFT));
196         ret = (void *) first_page;
197         first_page = __pa(first_page);
198
199         local_irq_save(flags);
200
201         iommu_batch_start(dev,
202                           (HV_PCI_MAP_ATTR_READ |
203                            HV_PCI_MAP_ATTR_WRITE),
204                           entry);
205
206         for (n = 0; n < npages; n++) {
207                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
208                 if (unlikely(err < 0L))
209                         goto iommu_map_fail;
210         }
211
212         if (unlikely(iommu_batch_end() < 0L))
213                 goto iommu_map_fail;
214
215         local_irq_restore(flags);
216
217         return ret;
218
219 iommu_map_fail:
220         /* Interrupts are disabled.  */
221         spin_lock(&iommu->lock);
222         arena_free(&iommu->arena, entry, npages);
223         spin_unlock_irqrestore(&iommu->lock, flags);
224
225 arena_alloc_fail:
226         free_pages(first_page, order);
227         return NULL;
228 }
229
230 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
231                                  dma_addr_t dvma)
232 {
233         struct pci_pbm_info *pbm;
234         struct iommu *iommu;
235         unsigned long flags, order, npages, entry;
236         u32 devhandle;
237
238         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
239         iommu = dev->archdata.iommu;
240         pbm = dev->archdata.host_controller;
241         devhandle = pbm->devhandle;
242         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
243
244         spin_lock_irqsave(&iommu->lock, flags);
245
246         arena_free(&iommu->arena, entry, npages);
247
248         do {
249                 unsigned long num;
250
251                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
252                                             npages);
253                 entry += num;
254                 npages -= num;
255         } while (npages != 0);
256
257         spin_unlock_irqrestore(&iommu->lock, flags);
258
259         order = get_order(size);
260         if (order < 10)
261                 free_pages((unsigned long)cpu, order);
262 }
263
264 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
265                                     enum dma_data_direction direction)
266 {
267         struct iommu *iommu;
268         unsigned long flags, npages, oaddr;
269         unsigned long i, base_paddr;
270         u32 bus_addr, ret;
271         unsigned long prot;
272         long entry;
273
274         iommu = dev->archdata.iommu;
275
276         if (unlikely(direction == DMA_NONE))
277                 goto bad;
278
279         oaddr = (unsigned long)ptr;
280         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
281         npages >>= IO_PAGE_SHIFT;
282
283         spin_lock_irqsave(&iommu->lock, flags);
284         entry = arena_alloc(&iommu->arena, npages);
285         spin_unlock_irqrestore(&iommu->lock, flags);
286
287         if (unlikely(entry < 0L))
288                 goto bad;
289
290         bus_addr = (iommu->page_table_map_base +
291                     (entry << IO_PAGE_SHIFT));
292         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
293         base_paddr = __pa(oaddr & IO_PAGE_MASK);
294         prot = HV_PCI_MAP_ATTR_READ;
295         if (direction != DMA_TO_DEVICE)
296                 prot |= HV_PCI_MAP_ATTR_WRITE;
297
298         local_irq_save(flags);
299
300         iommu_batch_start(dev, prot, entry);
301
302         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
303                 long err = iommu_batch_add(base_paddr);
304                 if (unlikely(err < 0L))
305                         goto iommu_map_fail;
306         }
307         if (unlikely(iommu_batch_end() < 0L))
308                 goto iommu_map_fail;
309
310         local_irq_restore(flags);
311
312         return ret;
313
314 bad:
315         if (printk_ratelimit())
316                 WARN_ON(1);
317         return DMA_ERROR_CODE;
318
319 iommu_map_fail:
320         /* Interrupts are disabled.  */
321         spin_lock(&iommu->lock);
322         arena_free(&iommu->arena, entry, npages);
323         spin_unlock_irqrestore(&iommu->lock, flags);
324
325         return DMA_ERROR_CODE;
326 }
327
328 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
329                                 size_t sz, enum dma_data_direction direction)
330 {
331         struct pci_pbm_info *pbm;
332         struct iommu *iommu;
333         unsigned long flags, npages;
334         long entry;
335         u32 devhandle;
336
337         if (unlikely(direction == DMA_NONE)) {
338                 if (printk_ratelimit())
339                         WARN_ON(1);
340                 return;
341         }
342
343         iommu = dev->archdata.iommu;
344         pbm = dev->archdata.host_controller;
345         devhandle = pbm->devhandle;
346
347         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
348         npages >>= IO_PAGE_SHIFT;
349         bus_addr &= IO_PAGE_MASK;
350
351         spin_lock_irqsave(&iommu->lock, flags);
352
353         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
354         arena_free(&iommu->arena, entry, npages);
355
356         do {
357                 unsigned long num;
358
359                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
360                                             npages);
361                 entry += num;
362                 npages -= num;
363         } while (npages != 0);
364
365         spin_unlock_irqrestore(&iommu->lock, flags);
366 }
367
368 #define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
369
370 static long fill_sg(long entry, struct device *dev,
371                     struct scatterlist *sg,
372                     int nused, int nelems, unsigned long prot)
373 {
374         struct scatterlist *dma_sg = sg;
375         unsigned long flags;
376         int i;
377
378         local_irq_save(flags);
379
380         iommu_batch_start(dev, prot, entry);
381
382         for (i = 0; i < nused; i++) {
383                 unsigned long pteval = ~0UL;
384                 u32 dma_npages;
385
386                 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
387                               dma_sg->dma_length +
388                               ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
389                 do {
390                         unsigned long offset;
391                         signed int len;
392
393                         /* If we are here, we know we have at least one
394                          * more page to map.  So walk forward until we
395                          * hit a page crossing, and begin creating new
396                          * mappings from that spot.
397                          */
398                         for (;;) {
399                                 unsigned long tmp;
400
401                                 tmp = SG_ENT_PHYS_ADDRESS(sg);
402                                 len = sg->length;
403                                 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
404                                         pteval = tmp & IO_PAGE_MASK;
405                                         offset = tmp & (IO_PAGE_SIZE - 1UL);
406                                         break;
407                                 }
408                                 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
409                                         pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
410                                         offset = 0UL;
411                                         len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
412                                         break;
413                                 }
414                                 sg = sg_next(sg);
415                                 nelems--;
416                         }
417
418                         pteval = (pteval & IOPTE_PAGE);
419                         while (len > 0) {
420                                 long err;
421
422                                 err = iommu_batch_add(pteval);
423                                 if (unlikely(err < 0L))
424                                         goto iommu_map_failed;
425
426                                 pteval += IO_PAGE_SIZE;
427                                 len -= (IO_PAGE_SIZE - offset);
428                                 offset = 0;
429                                 dma_npages--;
430                         }
431
432                         pteval = (pteval & IOPTE_PAGE) + len;
433                         sg = sg_next(sg);
434                         nelems--;
435
436                         /* Skip over any tail mappings we've fully mapped,
437                          * adjusting pteval along the way.  Stop when we
438                          * detect a page crossing event.
439                          */
440                         while (nelems &&
441                                (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
442                                (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
443                                ((pteval ^
444                                  (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
445                                 pteval += sg->length;
446                                 sg = sg_next(sg);
447                                 nelems--;
448                         }
449                         if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
450                                 pteval = ~0UL;
451                 } while (dma_npages != 0);
452                 dma_sg = sg_next(dma_sg);
453         }
454
455         if (unlikely(iommu_batch_end() < 0L))
456                 goto iommu_map_failed;
457
458         local_irq_restore(flags);
459         return 0;
460
461 iommu_map_failed:
462         local_irq_restore(flags);
463         return -1L;
464 }
465
466 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
467                          int nelems, enum dma_data_direction direction)
468 {
469         struct iommu *iommu;
470         unsigned long flags, npages, prot;
471         u32 dma_base;
472         struct scatterlist *sgtmp;
473         long entry, err;
474         int used;
475
476         /* Fast path single entry scatterlists. */
477         if (nelems == 1) {
478                 sglist->dma_address =
479                         dma_4v_map_single(dev, sg_virt(sglist),
480                                           sglist->length, direction);
481                 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
482                         return 0;
483                 sglist->dma_length = sglist->length;
484                 return 1;
485         }
486
487         iommu = dev->archdata.iommu;
488         
489         if (unlikely(direction == DMA_NONE))
490                 goto bad;
491
492         /* Step 1: Prepare scatter list. */
493         npages = prepare_sg(sglist, nelems);
494
495         /* Step 2: Allocate a cluster and context, if necessary. */
496         spin_lock_irqsave(&iommu->lock, flags);
497         entry = arena_alloc(&iommu->arena, npages);
498         spin_unlock_irqrestore(&iommu->lock, flags);
499
500         if (unlikely(entry < 0L))
501                 goto bad;
502
503         dma_base = iommu->page_table_map_base +
504                 (entry << IO_PAGE_SHIFT);
505
506         /* Step 3: Normalize DMA addresses. */
507         used = nelems;
508
509         sgtmp = sglist;
510         while (used && sgtmp->dma_length) {
511                 sgtmp->dma_address += dma_base;
512                 sgtmp = sg_next(sgtmp);
513                 used--;
514         }
515         used = nelems - used;
516
517         /* Step 4: Create the mappings. */
518         prot = HV_PCI_MAP_ATTR_READ;
519         if (direction != DMA_TO_DEVICE)
520                 prot |= HV_PCI_MAP_ATTR_WRITE;
521
522         err = fill_sg(entry, dev, sglist, used, nelems, prot);
523         if (unlikely(err < 0L))
524                 goto iommu_map_failed;
525
526         return used;
527
528 bad:
529         if (printk_ratelimit())
530                 WARN_ON(1);
531         return 0;
532
533 iommu_map_failed:
534         spin_lock_irqsave(&iommu->lock, flags);
535         arena_free(&iommu->arena, entry, npages);
536         spin_unlock_irqrestore(&iommu->lock, flags);
537
538         return 0;
539 }
540
541 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
542                             int nelems, enum dma_data_direction direction)
543 {
544         struct pci_pbm_info *pbm;
545         struct iommu *iommu;
546         unsigned long flags, i, npages;
547         struct scatterlist *sg, *sgprv;
548         long entry;
549         u32 devhandle, bus_addr;
550
551         if (unlikely(direction == DMA_NONE)) {
552                 if (printk_ratelimit())
553                         WARN_ON(1);
554         }
555
556         iommu = dev->archdata.iommu;
557         pbm = dev->archdata.host_controller;
558         devhandle = pbm->devhandle;
559         
560         bus_addr = sglist->dma_address & IO_PAGE_MASK;
561         sgprv = NULL;
562         for_each_sg(sglist, sg, nelems, i) {
563                 if (sg->dma_length == 0)
564                         break;
565
566                 sgprv = sg;
567         }
568
569         npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
570                   bus_addr) >> IO_PAGE_SHIFT;
571
572         entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
573
574         spin_lock_irqsave(&iommu->lock, flags);
575
576         arena_free(&iommu->arena, entry, npages);
577
578         do {
579                 unsigned long num;
580
581                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
582                                             npages);
583                 entry += num;
584                 npages -= num;
585         } while (npages != 0);
586
587         spin_unlock_irqrestore(&iommu->lock, flags);
588 }
589
590 static void dma_4v_sync_single_for_cpu(struct device *dev,
591                                        dma_addr_t bus_addr, size_t sz,
592                                        enum dma_data_direction direction)
593 {
594         /* Nothing to do... */
595 }
596
597 static void dma_4v_sync_sg_for_cpu(struct device *dev,
598                                    struct scatterlist *sglist, int nelems,
599                                    enum dma_data_direction direction)
600 {
601         /* Nothing to do... */
602 }
603
604 const struct dma_ops sun4v_dma_ops = {
605         .alloc_coherent                 = dma_4v_alloc_coherent,
606         .free_coherent                  = dma_4v_free_coherent,
607         .map_single                     = dma_4v_map_single,
608         .unmap_single                   = dma_4v_unmap_single,
609         .map_sg                         = dma_4v_map_sg,
610         .unmap_sg                       = dma_4v_unmap_sg,
611         .sync_single_for_cpu            = dma_4v_sync_single_for_cpu,
612         .sync_sg_for_cpu                = dma_4v_sync_sg_for_cpu,
613 };
614
615 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
616 {
617         struct property *prop;
618         struct device_node *dp;
619
620         dp = pbm->prom_node;
621         prop = of_find_property(dp, "66mhz-capable", NULL);
622         pbm->is_66mhz_capable = (prop != NULL);
623         pbm->pci_bus = pci_scan_one_pbm(pbm);
624
625         /* XXX register error interrupt handlers XXX */
626 }
627
628 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
629                                             struct iommu *iommu)
630 {
631         struct iommu_arena *arena = &iommu->arena;
632         unsigned long i, cnt = 0;
633         u32 devhandle;
634
635         devhandle = pbm->devhandle;
636         for (i = 0; i < arena->limit; i++) {
637                 unsigned long ret, io_attrs, ra;
638
639                 ret = pci_sun4v_iommu_getmap(devhandle,
640                                              HV_PCI_TSBID(0, i),
641                                              &io_attrs, &ra);
642                 if (ret == HV_EOK) {
643                         if (page_in_phys_avail(ra)) {
644                                 pci_sun4v_iommu_demap(devhandle,
645                                                       HV_PCI_TSBID(0, i), 1);
646                         } else {
647                                 cnt++;
648                                 __set_bit(i, arena->map);
649                         }
650                 }
651         }
652
653         return cnt;
654 }
655
656 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
657 {
658         struct iommu *iommu = pbm->iommu;
659         struct property *prop;
660         unsigned long num_tsb_entries, sz, tsbsize;
661         u32 vdma[2], dma_mask, dma_offset;
662
663         prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
664         if (prop) {
665                 u32 *val = prop->value;
666
667                 vdma[0] = val[0];
668                 vdma[1] = val[1];
669         } else {
670                 /* No property, use default values. */
671                 vdma[0] = 0x80000000;
672                 vdma[1] = 0x80000000;
673         }
674
675         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
676                 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
677                             vdma[0], vdma[1]);
678                 prom_halt();
679         };
680
681         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
682         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
683         tsbsize = num_tsb_entries * sizeof(iopte_t);
684
685         dma_offset = vdma[0];
686
687         /* Setup initial software IOMMU state. */
688         spin_lock_init(&iommu->lock);
689         iommu->ctx_lowest_free = 1;
690         iommu->page_table_map_base = dma_offset;
691         iommu->dma_addr_mask = dma_mask;
692
693         /* Allocate and initialize the free area map.  */
694         sz = (num_tsb_entries + 7) / 8;
695         sz = (sz + 7UL) & ~7UL;
696         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
697         if (!iommu->arena.map) {
698                 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
699                 prom_halt();
700         }
701         iommu->arena.limit = num_tsb_entries;
702
703         sz = probe_existing_entries(pbm, iommu);
704         if (sz)
705                 printk("%s: Imported %lu TSB entries from OBP\n",
706                        pbm->name, sz);
707 }
708
709 #ifdef CONFIG_PCI_MSI
710 struct pci_sun4v_msiq_entry {
711         u64             version_type;
712 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
713 #define MSIQ_VERSION_SHIFT              32
714 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
715 #define MSIQ_TYPE_SHIFT                 0
716 #define MSIQ_TYPE_NONE                  0x00
717 #define MSIQ_TYPE_MSG                   0x01
718 #define MSIQ_TYPE_MSI32                 0x02
719 #define MSIQ_TYPE_MSI64                 0x03
720 #define MSIQ_TYPE_INTX                  0x08
721 #define MSIQ_TYPE_NONE2                 0xff
722
723         u64             intx_sysino;
724         u64             reserved1;
725         u64             stick;
726         u64             req_id;  /* bus/device/func */
727 #define MSIQ_REQID_BUS_MASK             0xff00UL
728 #define MSIQ_REQID_BUS_SHIFT            8
729 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
730 #define MSIQ_REQID_DEVICE_SHIFT         3
731 #define MSIQ_REQID_FUNC_MASK            0x0007UL
732 #define MSIQ_REQID_FUNC_SHIFT           0
733
734         u64             msi_address;
735
736         /* The format of this value is message type dependent.
737          * For MSI bits 15:0 are the data from the MSI packet.
738          * For MSI-X bits 31:0 are the data from the MSI packet.
739          * For MSG, the message code and message routing code where:
740          *      bits 39:32 is the bus/device/fn of the msg target-id
741          *      bits 18:16 is the message routing code
742          *      bits 7:0 is the message code
743          * For INTx the low order 2-bits are:
744          *      00 - INTA
745          *      01 - INTB
746          *      10 - INTC
747          *      11 - INTD
748          */
749         u64             msi_data;
750
751         u64             reserved2;
752 };
753
754 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
755                               unsigned long *head)
756 {
757         unsigned long err, limit;
758
759         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
760         if (unlikely(err))
761                 return -ENXIO;
762
763         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
764         if (unlikely(*head >= limit))
765                 return -EFBIG;
766
767         return 0;
768 }
769
770 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
771                                  unsigned long msiqid, unsigned long *head,
772                                  unsigned long *msi)
773 {
774         struct pci_sun4v_msiq_entry *ep;
775         unsigned long err, type;
776
777         /* Note: void pointer arithmetic, 'head' is a byte offset  */
778         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
779                                  (pbm->msiq_ent_count *
780                                   sizeof(struct pci_sun4v_msiq_entry))) +
781               *head);
782
783         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
784                 return 0;
785
786         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
787         if (unlikely(type != MSIQ_TYPE_MSI32 &&
788                      type != MSIQ_TYPE_MSI64))
789                 return -EINVAL;
790
791         *msi = ep->msi_data;
792
793         err = pci_sun4v_msi_setstate(pbm->devhandle,
794                                      ep->msi_data /* msi_num */,
795                                      HV_MSISTATE_IDLE);
796         if (unlikely(err))
797                 return -ENXIO;
798
799         /* Clear the entry.  */
800         ep->version_type &= ~MSIQ_TYPE_MASK;
801
802         (*head) += sizeof(struct pci_sun4v_msiq_entry);
803         if (*head >=
804             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
805                 *head = 0;
806
807         return 1;
808 }
809
810 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
811                               unsigned long head)
812 {
813         unsigned long err;
814
815         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
816         if (unlikely(err))
817                 return -EINVAL;
818
819         return 0;
820 }
821
822 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
823                                unsigned long msi, int is_msi64)
824 {
825         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
826                                   (is_msi64 ?
827                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
828                 return -ENXIO;
829         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
830                 return -ENXIO;
831         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
832                 return -ENXIO;
833         return 0;
834 }
835
836 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
837 {
838         unsigned long err, msiqid;
839
840         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
841         if (err)
842                 return -ENXIO;
843
844         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
845
846         return 0;
847 }
848
849 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
850 {
851         unsigned long q_size, alloc_size, pages, order;
852         int i;
853
854         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
855         alloc_size = (pbm->msiq_num * q_size);
856         order = get_order(alloc_size);
857         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
858         if (pages == 0UL) {
859                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
860                        order);
861                 return -ENOMEM;
862         }
863         memset((char *)pages, 0, PAGE_SIZE << order);
864         pbm->msi_queues = (void *) pages;
865
866         for (i = 0; i < pbm->msiq_num; i++) {
867                 unsigned long err, base = __pa(pages + (i * q_size));
868                 unsigned long ret1, ret2;
869
870                 err = pci_sun4v_msiq_conf(pbm->devhandle,
871                                           pbm->msiq_first + i,
872                                           base, pbm->msiq_ent_count);
873                 if (err) {
874                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
875                                err);
876                         goto h_error;
877                 }
878
879                 err = pci_sun4v_msiq_info(pbm->devhandle,
880                                           pbm->msiq_first + i,
881                                           &ret1, &ret2);
882                 if (err) {
883                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
884                                err);
885                         goto h_error;
886                 }
887                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
888                         printk(KERN_ERR "MSI: Bogus qconf "
889                                "expected[%lx:%x] got[%lx:%lx]\n",
890                                base, pbm->msiq_ent_count,
891                                ret1, ret2);
892                         goto h_error;
893                 }
894         }
895
896         return 0;
897
898 h_error:
899         free_pages(pages, order);
900         return -EINVAL;
901 }
902
903 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
904 {
905         unsigned long q_size, alloc_size, pages, order;
906         int i;
907
908         for (i = 0; i < pbm->msiq_num; i++) {
909                 unsigned long msiqid = pbm->msiq_first + i;
910
911                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
912         }
913
914         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
915         alloc_size = (pbm->msiq_num * q_size);
916         order = get_order(alloc_size);
917
918         pages = (unsigned long) pbm->msi_queues;
919
920         free_pages(pages, order);
921
922         pbm->msi_queues = NULL;
923 }
924
925 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
926                                     unsigned long msiqid,
927                                     unsigned long devino)
928 {
929         unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
930
931         if (!virt_irq)
932                 return -ENOMEM;
933
934         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
935                 return -EINVAL;
936         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
937                 return -EINVAL;
938
939         return virt_irq;
940 }
941
942 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
943         .get_head       =       pci_sun4v_get_head,
944         .dequeue_msi    =       pci_sun4v_dequeue_msi,
945         .set_head       =       pci_sun4v_set_head,
946         .msi_setup      =       pci_sun4v_msi_setup,
947         .msi_teardown   =       pci_sun4v_msi_teardown,
948         .msiq_alloc     =       pci_sun4v_msiq_alloc,
949         .msiq_free      =       pci_sun4v_msiq_free,
950         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
951 };
952
953 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
954 {
955         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
956 }
957 #else /* CONFIG_PCI_MSI */
958 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
959 {
960 }
961 #endif /* !(CONFIG_PCI_MSI) */
962
963 static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
964 {
965         struct pci_pbm_info *pbm;
966
967         if (devhandle & 0x40)
968                 pbm = &p->pbm_B;
969         else
970                 pbm = &p->pbm_A;
971
972         pbm->next = pci_pbm_root;
973         pci_pbm_root = pbm;
974
975         pbm->scan_bus = pci_sun4v_scan_bus;
976         pbm->pci_ops = &sun4v_pci_ops;
977         pbm->config_space_reg_bits = 12;
978
979         pbm->index = pci_num_pbms++;
980
981         pbm->parent = p;
982         pbm->prom_node = dp;
983
984         pbm->devhandle = devhandle;
985
986         pbm->name = dp->full_name;
987
988         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
989
990         pci_determine_mem_io_space(pbm);
991
992         pci_get_pbm_props(pbm);
993         pci_sun4v_iommu_init(pbm);
994         pci_sun4v_msi_init(pbm);
995 }
996
997 void __init sun4v_pci_init(struct device_node *dp, char *model_name)
998 {
999         static int hvapi_negotiated = 0;
1000         struct pci_controller_info *p;
1001         struct pci_pbm_info *pbm;
1002         struct iommu *iommu;
1003         struct property *prop;
1004         struct linux_prom64_registers *regs;
1005         u32 devhandle;
1006         int i;
1007
1008         if (!hvapi_negotiated++) {
1009                 int err = sun4v_hvapi_register(HV_GRP_PCI,
1010                                                vpci_major,
1011                                                &vpci_minor);
1012
1013                 if (err) {
1014                         prom_printf("SUN4V_PCI: Could not register hvapi, "
1015                                     "err=%d\n", err);
1016                         prom_halt();
1017                 }
1018                 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1019                        vpci_major, vpci_minor);
1020
1021                 dma_ops = &sun4v_dma_ops;
1022         }
1023
1024         prop = of_find_property(dp, "reg", NULL);
1025         regs = prop->value;
1026
1027         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1028
1029         for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
1030                 if (pbm->devhandle == (devhandle ^ 0x40)) {
1031                         pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
1032                         return;
1033                 }
1034         }
1035
1036         for_each_possible_cpu(i) {
1037                 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1038
1039                 if (!page)
1040                         goto fatal_memory_error;
1041
1042                 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1043         }
1044
1045         p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1046         if (!p)
1047                 goto fatal_memory_error;
1048
1049         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1050         if (!iommu)
1051                 goto fatal_memory_error;
1052
1053         p->pbm_A.iommu = iommu;
1054
1055         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1056         if (!iommu)
1057                 goto fatal_memory_error;
1058
1059         p->pbm_B.iommu = iommu;
1060
1061         pci_sun4v_pbm_init(p, dp, devhandle);
1062         return;
1063
1064 fatal_memory_error:
1065         prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1066         prom_halt();
1067 }