Merge branch 'master' into devel
[linux-2.6] / arch / sparc / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/of_device.h>
17
18 #include <asm/iommu.h>
19 #include <asm/irq.h>
20 #include <asm/hypervisor.h>
21 #include <asm/prom.h>
22
23 #include "pci_impl.h"
24 #include "iommu_common.h"
25
26 #include "pci_sun4v.h"
27
28 #define DRIVER_NAME     "pci_sun4v"
29 #define PFX             DRIVER_NAME ": "
30
31 static unsigned long vpci_major = 1;
32 static unsigned long vpci_minor = 1;
33
34 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
35
36 struct iommu_batch {
37         struct device   *dev;           /* Device mapping is for.       */
38         unsigned long   prot;           /* IOMMU page protections       */
39         unsigned long   entry;          /* Index into IOTSB.            */
40         u64             *pglist;        /* List of physical pages       */
41         unsigned long   npages;         /* Number of pages in list.     */
42 };
43
44 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
45 static int iommu_batch_initialized;
46
47 /* Interrupts must be disabled.  */
48 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
49 {
50         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
51
52         p->dev          = dev;
53         p->prot         = prot;
54         p->entry        = entry;
55         p->npages       = 0;
56 }
57
58 /* Interrupts must be disabled.  */
59 static long iommu_batch_flush(struct iommu_batch *p)
60 {
61         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
62         unsigned long devhandle = pbm->devhandle;
63         unsigned long prot = p->prot;
64         unsigned long entry = p->entry;
65         u64 *pglist = p->pglist;
66         unsigned long npages = p->npages;
67
68         while (npages != 0) {
69                 long num;
70
71                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
72                                           npages, prot, __pa(pglist));
73                 if (unlikely(num < 0)) {
74                         if (printk_ratelimit())
75                                 printk("iommu_batch_flush: IOMMU map of "
76                                        "[%08lx:%08llx:%lx:%lx:%lx] failed with "
77                                        "status %ld\n",
78                                        devhandle, HV_PCI_TSBID(0, entry),
79                                        npages, prot, __pa(pglist), num);
80                         return -1;
81                 }
82
83                 entry += num;
84                 npages -= num;
85                 pglist += num;
86         }
87
88         p->entry = entry;
89         p->npages = 0;
90
91         return 0;
92 }
93
94 static inline void iommu_batch_new_entry(unsigned long entry)
95 {
96         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
97
98         if (p->entry + p->npages == entry)
99                 return;
100         if (p->entry != ~0UL)
101                 iommu_batch_flush(p);
102         p->entry = entry;
103 }
104
105 /* Interrupts must be disabled.  */
106 static inline long iommu_batch_add(u64 phys_page)
107 {
108         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
109
110         BUG_ON(p->npages >= PGLIST_NENTS);
111
112         p->pglist[p->npages++] = phys_page;
113         if (p->npages == PGLIST_NENTS)
114                 return iommu_batch_flush(p);
115
116         return 0;
117 }
118
119 /* Interrupts must be disabled.  */
120 static inline long iommu_batch_end(void)
121 {
122         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
123
124         BUG_ON(p->npages >= PGLIST_NENTS);
125
126         return iommu_batch_flush(p);
127 }
128
129 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
130                                    dma_addr_t *dma_addrp, gfp_t gfp)
131 {
132         unsigned long flags, order, first_page, npages, n;
133         struct iommu *iommu;
134         struct page *page;
135         void *ret;
136         long entry;
137         int nid;
138
139         size = IO_PAGE_ALIGN(size);
140         order = get_order(size);
141         if (unlikely(order >= MAX_ORDER))
142                 return NULL;
143
144         npages = size >> IO_PAGE_SHIFT;
145
146         nid = dev->archdata.numa_node;
147         page = alloc_pages_node(nid, gfp, order);
148         if (unlikely(!page))
149                 return NULL;
150
151         first_page = (unsigned long) page_address(page);
152         memset((char *)first_page, 0, PAGE_SIZE << order);
153
154         iommu = dev->archdata.iommu;
155
156         spin_lock_irqsave(&iommu->lock, flags);
157         entry = iommu_range_alloc(dev, iommu, npages, NULL);
158         spin_unlock_irqrestore(&iommu->lock, flags);
159
160         if (unlikely(entry == DMA_ERROR_CODE))
161                 goto range_alloc_fail;
162
163         *dma_addrp = (iommu->page_table_map_base +
164                       (entry << IO_PAGE_SHIFT));
165         ret = (void *) first_page;
166         first_page = __pa(first_page);
167
168         local_irq_save(flags);
169
170         iommu_batch_start(dev,
171                           (HV_PCI_MAP_ATTR_READ |
172                            HV_PCI_MAP_ATTR_WRITE),
173                           entry);
174
175         for (n = 0; n < npages; n++) {
176                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
177                 if (unlikely(err < 0L))
178                         goto iommu_map_fail;
179         }
180
181         if (unlikely(iommu_batch_end() < 0L))
182                 goto iommu_map_fail;
183
184         local_irq_restore(flags);
185
186         return ret;
187
188 iommu_map_fail:
189         /* Interrupts are disabled.  */
190         spin_lock(&iommu->lock);
191         iommu_range_free(iommu, *dma_addrp, npages);
192         spin_unlock_irqrestore(&iommu->lock, flags);
193
194 range_alloc_fail:
195         free_pages(first_page, order);
196         return NULL;
197 }
198
199 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
200                                  dma_addr_t dvma)
201 {
202         struct pci_pbm_info *pbm;
203         struct iommu *iommu;
204         unsigned long flags, order, npages, entry;
205         u32 devhandle;
206
207         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
208         iommu = dev->archdata.iommu;
209         pbm = dev->archdata.host_controller;
210         devhandle = pbm->devhandle;
211         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
212
213         spin_lock_irqsave(&iommu->lock, flags);
214
215         iommu_range_free(iommu, dvma, npages);
216
217         do {
218                 unsigned long num;
219
220                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
221                                             npages);
222                 entry += num;
223                 npages -= num;
224         } while (npages != 0);
225
226         spin_unlock_irqrestore(&iommu->lock, flags);
227
228         order = get_order(size);
229         if (order < 10)
230                 free_pages((unsigned long)cpu, order);
231 }
232
233 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
234                                     enum dma_data_direction direction)
235 {
236         struct iommu *iommu;
237         unsigned long flags, npages, oaddr;
238         unsigned long i, base_paddr;
239         u32 bus_addr, ret;
240         unsigned long prot;
241         long entry;
242
243         iommu = dev->archdata.iommu;
244
245         if (unlikely(direction == DMA_NONE))
246                 goto bad;
247
248         oaddr = (unsigned long)ptr;
249         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
250         npages >>= IO_PAGE_SHIFT;
251
252         spin_lock_irqsave(&iommu->lock, flags);
253         entry = iommu_range_alloc(dev, iommu, npages, NULL);
254         spin_unlock_irqrestore(&iommu->lock, flags);
255
256         if (unlikely(entry == DMA_ERROR_CODE))
257                 goto bad;
258
259         bus_addr = (iommu->page_table_map_base +
260                     (entry << IO_PAGE_SHIFT));
261         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
262         base_paddr = __pa(oaddr & IO_PAGE_MASK);
263         prot = HV_PCI_MAP_ATTR_READ;
264         if (direction != DMA_TO_DEVICE)
265                 prot |= HV_PCI_MAP_ATTR_WRITE;
266
267         local_irq_save(flags);
268
269         iommu_batch_start(dev, prot, entry);
270
271         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
272                 long err = iommu_batch_add(base_paddr);
273                 if (unlikely(err < 0L))
274                         goto iommu_map_fail;
275         }
276         if (unlikely(iommu_batch_end() < 0L))
277                 goto iommu_map_fail;
278
279         local_irq_restore(flags);
280
281         return ret;
282
283 bad:
284         if (printk_ratelimit())
285                 WARN_ON(1);
286         return DMA_ERROR_CODE;
287
288 iommu_map_fail:
289         /* Interrupts are disabled.  */
290         spin_lock(&iommu->lock);
291         iommu_range_free(iommu, bus_addr, npages);
292         spin_unlock_irqrestore(&iommu->lock, flags);
293
294         return DMA_ERROR_CODE;
295 }
296
297 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
298                                 size_t sz, enum dma_data_direction direction)
299 {
300         struct pci_pbm_info *pbm;
301         struct iommu *iommu;
302         unsigned long flags, npages;
303         long entry;
304         u32 devhandle;
305
306         if (unlikely(direction == DMA_NONE)) {
307                 if (printk_ratelimit())
308                         WARN_ON(1);
309                 return;
310         }
311
312         iommu = dev->archdata.iommu;
313         pbm = dev->archdata.host_controller;
314         devhandle = pbm->devhandle;
315
316         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
317         npages >>= IO_PAGE_SHIFT;
318         bus_addr &= IO_PAGE_MASK;
319
320         spin_lock_irqsave(&iommu->lock, flags);
321
322         iommu_range_free(iommu, bus_addr, npages);
323
324         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
325         do {
326                 unsigned long num;
327
328                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
329                                             npages);
330                 entry += num;
331                 npages -= num;
332         } while (npages != 0);
333
334         spin_unlock_irqrestore(&iommu->lock, flags);
335 }
336
337 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
338                          int nelems, enum dma_data_direction direction)
339 {
340         struct scatterlist *s, *outs, *segstart;
341         unsigned long flags, handle, prot;
342         dma_addr_t dma_next = 0, dma_addr;
343         unsigned int max_seg_size;
344         unsigned long seg_boundary_size;
345         int outcount, incount, i;
346         struct iommu *iommu;
347         unsigned long base_shift;
348         long err;
349
350         BUG_ON(direction == DMA_NONE);
351
352         iommu = dev->archdata.iommu;
353         if (nelems == 0 || !iommu)
354                 return 0;
355         
356         prot = HV_PCI_MAP_ATTR_READ;
357         if (direction != DMA_TO_DEVICE)
358                 prot |= HV_PCI_MAP_ATTR_WRITE;
359
360         outs = s = segstart = &sglist[0];
361         outcount = 1;
362         incount = nelems;
363         handle = 0;
364
365         /* Init first segment length for backout at failure */
366         outs->dma_length = 0;
367
368         spin_lock_irqsave(&iommu->lock, flags);
369
370         iommu_batch_start(dev, prot, ~0UL);
371
372         max_seg_size = dma_get_max_seg_size(dev);
373         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
374                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
375         base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
376         for_each_sg(sglist, s, nelems, i) {
377                 unsigned long paddr, npages, entry, out_entry = 0, slen;
378
379                 slen = s->length;
380                 /* Sanity check */
381                 if (slen == 0) {
382                         dma_next = 0;
383                         continue;
384                 }
385                 /* Allocate iommu entries for that segment */
386                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
387                 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
388                 entry = iommu_range_alloc(dev, iommu, npages, &handle);
389
390                 /* Handle failure */
391                 if (unlikely(entry == DMA_ERROR_CODE)) {
392                         if (printk_ratelimit())
393                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
394                                        " npages %lx\n", iommu, paddr, npages);
395                         goto iommu_map_failed;
396                 }
397
398                 iommu_batch_new_entry(entry);
399
400                 /* Convert entry to a dma_addr_t */
401                 dma_addr = iommu->page_table_map_base +
402                         (entry << IO_PAGE_SHIFT);
403                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
404
405                 /* Insert into HW table */
406                 paddr &= IO_PAGE_MASK;
407                 while (npages--) {
408                         err = iommu_batch_add(paddr);
409                         if (unlikely(err < 0L))
410                                 goto iommu_map_failed;
411                         paddr += IO_PAGE_SIZE;
412                 }
413
414                 /* If we are in an open segment, try merging */
415                 if (segstart != s) {
416                         /* We cannot merge if:
417                          * - allocated dma_addr isn't contiguous to previous allocation
418                          */
419                         if ((dma_addr != dma_next) ||
420                             (outs->dma_length + s->length > max_seg_size) ||
421                             (is_span_boundary(out_entry, base_shift,
422                                               seg_boundary_size, outs, s))) {
423                                 /* Can't merge: create a new segment */
424                                 segstart = s;
425                                 outcount++;
426                                 outs = sg_next(outs);
427                         } else {
428                                 outs->dma_length += s->length;
429                         }
430                 }
431
432                 if (segstart == s) {
433                         /* This is a new segment, fill entries */
434                         outs->dma_address = dma_addr;
435                         outs->dma_length = slen;
436                         out_entry = entry;
437                 }
438
439                 /* Calculate next page pointer for contiguous check */
440                 dma_next = dma_addr + slen;
441         }
442
443         err = iommu_batch_end();
444
445         if (unlikely(err < 0L))
446                 goto iommu_map_failed;
447
448         spin_unlock_irqrestore(&iommu->lock, flags);
449
450         if (outcount < incount) {
451                 outs = sg_next(outs);
452                 outs->dma_address = DMA_ERROR_CODE;
453                 outs->dma_length = 0;
454         }
455
456         return outcount;
457
458 iommu_map_failed:
459         for_each_sg(sglist, s, nelems, i) {
460                 if (s->dma_length != 0) {
461                         unsigned long vaddr, npages;
462
463                         vaddr = s->dma_address & IO_PAGE_MASK;
464                         npages = iommu_num_pages(s->dma_address, s->dma_length,
465                                                  IO_PAGE_SIZE);
466                         iommu_range_free(iommu, vaddr, npages);
467                         /* XXX demap? XXX */
468                         s->dma_address = DMA_ERROR_CODE;
469                         s->dma_length = 0;
470                 }
471                 if (s == outs)
472                         break;
473         }
474         spin_unlock_irqrestore(&iommu->lock, flags);
475
476         return 0;
477 }
478
479 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
480                             int nelems, enum dma_data_direction direction)
481 {
482         struct pci_pbm_info *pbm;
483         struct scatterlist *sg;
484         struct iommu *iommu;
485         unsigned long flags;
486         u32 devhandle;
487
488         BUG_ON(direction == DMA_NONE);
489
490         iommu = dev->archdata.iommu;
491         pbm = dev->archdata.host_controller;
492         devhandle = pbm->devhandle;
493         
494         spin_lock_irqsave(&iommu->lock, flags);
495
496         sg = sglist;
497         while (nelems--) {
498                 dma_addr_t dma_handle = sg->dma_address;
499                 unsigned int len = sg->dma_length;
500                 unsigned long npages, entry;
501
502                 if (!len)
503                         break;
504                 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
505                 iommu_range_free(iommu, dma_handle, npages);
506
507                 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
508                 while (npages) {
509                         unsigned long num;
510
511                         num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
512                                                     npages);
513                         entry += num;
514                         npages -= num;
515                 }
516
517                 sg = sg_next(sg);
518         }
519
520         spin_unlock_irqrestore(&iommu->lock, flags);
521 }
522
523 static void dma_4v_sync_single_for_cpu(struct device *dev,
524                                        dma_addr_t bus_addr, size_t sz,
525                                        enum dma_data_direction direction)
526 {
527         /* Nothing to do... */
528 }
529
530 static void dma_4v_sync_sg_for_cpu(struct device *dev,
531                                    struct scatterlist *sglist, int nelems,
532                                    enum dma_data_direction direction)
533 {
534         /* Nothing to do... */
535 }
536
537 static const struct dma_ops sun4v_dma_ops = {
538         .alloc_coherent                 = dma_4v_alloc_coherent,
539         .free_coherent                  = dma_4v_free_coherent,
540         .map_single                     = dma_4v_map_single,
541         .unmap_single                   = dma_4v_unmap_single,
542         .map_sg                         = dma_4v_map_sg,
543         .unmap_sg                       = dma_4v_unmap_sg,
544         .sync_single_for_cpu            = dma_4v_sync_single_for_cpu,
545         .sync_sg_for_cpu                = dma_4v_sync_sg_for_cpu,
546 };
547
548 static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
549                                       struct device *parent)
550 {
551         struct property *prop;
552         struct device_node *dp;
553
554         dp = pbm->op->node;
555         prop = of_find_property(dp, "66mhz-capable", NULL);
556         pbm->is_66mhz_capable = (prop != NULL);
557         pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
558
559         /* XXX register error interrupt handlers XXX */
560 }
561
562 static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
563                                                    struct iommu *iommu)
564 {
565         struct iommu_arena *arena = &iommu->arena;
566         unsigned long i, cnt = 0;
567         u32 devhandle;
568
569         devhandle = pbm->devhandle;
570         for (i = 0; i < arena->limit; i++) {
571                 unsigned long ret, io_attrs, ra;
572
573                 ret = pci_sun4v_iommu_getmap(devhandle,
574                                              HV_PCI_TSBID(0, i),
575                                              &io_attrs, &ra);
576                 if (ret == HV_EOK) {
577                         if (page_in_phys_avail(ra)) {
578                                 pci_sun4v_iommu_demap(devhandle,
579                                                       HV_PCI_TSBID(0, i), 1);
580                         } else {
581                                 cnt++;
582                                 __set_bit(i, arena->map);
583                         }
584                 }
585         }
586
587         return cnt;
588 }
589
590 static int __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
591 {
592         static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
593         struct iommu *iommu = pbm->iommu;
594         unsigned long num_tsb_entries, sz, tsbsize;
595         u32 dma_mask, dma_offset;
596         const u32 *vdma;
597
598         vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
599         if (!vdma)
600                 vdma = vdma_default;
601
602         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
603                 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
604                        vdma[0], vdma[1]);
605                 return -EINVAL;
606         };
607
608         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
609         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
610         tsbsize = num_tsb_entries * sizeof(iopte_t);
611
612         dma_offset = vdma[0];
613
614         /* Setup initial software IOMMU state. */
615         spin_lock_init(&iommu->lock);
616         iommu->ctx_lowest_free = 1;
617         iommu->page_table_map_base = dma_offset;
618         iommu->dma_addr_mask = dma_mask;
619
620         /* Allocate and initialize the free area map.  */
621         sz = (num_tsb_entries + 7) / 8;
622         sz = (sz + 7UL) & ~7UL;
623         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
624         if (!iommu->arena.map) {
625                 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
626                 return -ENOMEM;
627         }
628         iommu->arena.limit = num_tsb_entries;
629
630         sz = probe_existing_entries(pbm, iommu);
631         if (sz)
632                 printk("%s: Imported %lu TSB entries from OBP\n",
633                        pbm->name, sz);
634
635         return 0;
636 }
637
638 #ifdef CONFIG_PCI_MSI
639 struct pci_sun4v_msiq_entry {
640         u64             version_type;
641 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
642 #define MSIQ_VERSION_SHIFT              32
643 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
644 #define MSIQ_TYPE_SHIFT                 0
645 #define MSIQ_TYPE_NONE                  0x00
646 #define MSIQ_TYPE_MSG                   0x01
647 #define MSIQ_TYPE_MSI32                 0x02
648 #define MSIQ_TYPE_MSI64                 0x03
649 #define MSIQ_TYPE_INTX                  0x08
650 #define MSIQ_TYPE_NONE2                 0xff
651
652         u64             intx_sysino;
653         u64             reserved1;
654         u64             stick;
655         u64             req_id;  /* bus/device/func */
656 #define MSIQ_REQID_BUS_MASK             0xff00UL
657 #define MSIQ_REQID_BUS_SHIFT            8
658 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
659 #define MSIQ_REQID_DEVICE_SHIFT         3
660 #define MSIQ_REQID_FUNC_MASK            0x0007UL
661 #define MSIQ_REQID_FUNC_SHIFT           0
662
663         u64             msi_address;
664
665         /* The format of this value is message type dependent.
666          * For MSI bits 15:0 are the data from the MSI packet.
667          * For MSI-X bits 31:0 are the data from the MSI packet.
668          * For MSG, the message code and message routing code where:
669          *      bits 39:32 is the bus/device/fn of the msg target-id
670          *      bits 18:16 is the message routing code
671          *      bits 7:0 is the message code
672          * For INTx the low order 2-bits are:
673          *      00 - INTA
674          *      01 - INTB
675          *      10 - INTC
676          *      11 - INTD
677          */
678         u64             msi_data;
679
680         u64             reserved2;
681 };
682
683 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
684                               unsigned long *head)
685 {
686         unsigned long err, limit;
687
688         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
689         if (unlikely(err))
690                 return -ENXIO;
691
692         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
693         if (unlikely(*head >= limit))
694                 return -EFBIG;
695
696         return 0;
697 }
698
699 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
700                                  unsigned long msiqid, unsigned long *head,
701                                  unsigned long *msi)
702 {
703         struct pci_sun4v_msiq_entry *ep;
704         unsigned long err, type;
705
706         /* Note: void pointer arithmetic, 'head' is a byte offset  */
707         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
708                                  (pbm->msiq_ent_count *
709                                   sizeof(struct pci_sun4v_msiq_entry))) +
710               *head);
711
712         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
713                 return 0;
714
715         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
716         if (unlikely(type != MSIQ_TYPE_MSI32 &&
717                      type != MSIQ_TYPE_MSI64))
718                 return -EINVAL;
719
720         *msi = ep->msi_data;
721
722         err = pci_sun4v_msi_setstate(pbm->devhandle,
723                                      ep->msi_data /* msi_num */,
724                                      HV_MSISTATE_IDLE);
725         if (unlikely(err))
726                 return -ENXIO;
727
728         /* Clear the entry.  */
729         ep->version_type &= ~MSIQ_TYPE_MASK;
730
731         (*head) += sizeof(struct pci_sun4v_msiq_entry);
732         if (*head >=
733             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
734                 *head = 0;
735
736         return 1;
737 }
738
739 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
740                               unsigned long head)
741 {
742         unsigned long err;
743
744         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
745         if (unlikely(err))
746                 return -EINVAL;
747
748         return 0;
749 }
750
751 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
752                                unsigned long msi, int is_msi64)
753 {
754         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
755                                   (is_msi64 ?
756                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
757                 return -ENXIO;
758         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
759                 return -ENXIO;
760         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
761                 return -ENXIO;
762         return 0;
763 }
764
765 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
766 {
767         unsigned long err, msiqid;
768
769         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
770         if (err)
771                 return -ENXIO;
772
773         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
774
775         return 0;
776 }
777
778 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
779 {
780         unsigned long q_size, alloc_size, pages, order;
781         int i;
782
783         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
784         alloc_size = (pbm->msiq_num * q_size);
785         order = get_order(alloc_size);
786         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
787         if (pages == 0UL) {
788                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
789                        order);
790                 return -ENOMEM;
791         }
792         memset((char *)pages, 0, PAGE_SIZE << order);
793         pbm->msi_queues = (void *) pages;
794
795         for (i = 0; i < pbm->msiq_num; i++) {
796                 unsigned long err, base = __pa(pages + (i * q_size));
797                 unsigned long ret1, ret2;
798
799                 err = pci_sun4v_msiq_conf(pbm->devhandle,
800                                           pbm->msiq_first + i,
801                                           base, pbm->msiq_ent_count);
802                 if (err) {
803                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
804                                err);
805                         goto h_error;
806                 }
807
808                 err = pci_sun4v_msiq_info(pbm->devhandle,
809                                           pbm->msiq_first + i,
810                                           &ret1, &ret2);
811                 if (err) {
812                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
813                                err);
814                         goto h_error;
815                 }
816                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
817                         printk(KERN_ERR "MSI: Bogus qconf "
818                                "expected[%lx:%x] got[%lx:%lx]\n",
819                                base, pbm->msiq_ent_count,
820                                ret1, ret2);
821                         goto h_error;
822                 }
823         }
824
825         return 0;
826
827 h_error:
828         free_pages(pages, order);
829         return -EINVAL;
830 }
831
832 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
833 {
834         unsigned long q_size, alloc_size, pages, order;
835         int i;
836
837         for (i = 0; i < pbm->msiq_num; i++) {
838                 unsigned long msiqid = pbm->msiq_first + i;
839
840                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
841         }
842
843         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
844         alloc_size = (pbm->msiq_num * q_size);
845         order = get_order(alloc_size);
846
847         pages = (unsigned long) pbm->msi_queues;
848
849         free_pages(pages, order);
850
851         pbm->msi_queues = NULL;
852 }
853
854 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
855                                     unsigned long msiqid,
856                                     unsigned long devino)
857 {
858         unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
859
860         if (!virt_irq)
861                 return -ENOMEM;
862
863         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
864                 return -EINVAL;
865         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
866                 return -EINVAL;
867
868         return virt_irq;
869 }
870
871 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
872         .get_head       =       pci_sun4v_get_head,
873         .dequeue_msi    =       pci_sun4v_dequeue_msi,
874         .set_head       =       pci_sun4v_set_head,
875         .msi_setup      =       pci_sun4v_msi_setup,
876         .msi_teardown   =       pci_sun4v_msi_teardown,
877         .msiq_alloc     =       pci_sun4v_msiq_alloc,
878         .msiq_free      =       pci_sun4v_msiq_free,
879         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
880 };
881
882 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
883 {
884         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
885 }
886 #else /* CONFIG_PCI_MSI */
887 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
888 {
889 }
890 #endif /* !(CONFIG_PCI_MSI) */
891
892 static int __init pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
893                                      struct of_device *op, u32 devhandle)
894 {
895         struct device_node *dp = op->node;
896         int err;
897
898         pbm->numa_node = of_node_to_nid(dp);
899
900         pbm->pci_ops = &sun4v_pci_ops;
901         pbm->config_space_reg_bits = 12;
902
903         pbm->index = pci_num_pbms++;
904
905         pbm->op = op;
906
907         pbm->devhandle = devhandle;
908
909         pbm->name = dp->full_name;
910
911         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
912         printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
913
914         pci_determine_mem_io_space(pbm);
915
916         pci_get_pbm_props(pbm);
917
918         err = pci_sun4v_iommu_init(pbm);
919         if (err)
920                 return err;
921
922         pci_sun4v_msi_init(pbm);
923
924         pci_sun4v_scan_bus(pbm, &op->dev);
925
926         pbm->next = pci_pbm_root;
927         pci_pbm_root = pbm;
928
929         return 0;
930 }
931
932 static int __devinit pci_sun4v_probe(struct of_device *op,
933                                      const struct of_device_id *match)
934 {
935         const struct linux_prom64_registers *regs;
936         static int hvapi_negotiated = 0;
937         struct pci_pbm_info *pbm;
938         struct device_node *dp;
939         struct iommu *iommu;
940         u32 devhandle;
941         int i, err;
942
943         dp = op->node;
944
945         if (!hvapi_negotiated++) {
946                 err = sun4v_hvapi_register(HV_GRP_PCI,
947                                            vpci_major,
948                                            &vpci_minor);
949
950                 if (err) {
951                         printk(KERN_ERR PFX "Could not register hvapi, "
952                                "err=%d\n", err);
953                         return err;
954                 }
955                 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
956                        vpci_major, vpci_minor);
957
958                 dma_ops = &sun4v_dma_ops;
959         }
960
961         regs = of_get_property(dp, "reg", NULL);
962         err = -ENODEV;
963         if (!regs) {
964                 printk(KERN_ERR PFX "Could not find config registers\n");
965                 goto out_err;
966         }
967         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
968
969         err = -ENOMEM;
970         if (!iommu_batch_initialized) {
971                 for_each_possible_cpu(i) {
972                         unsigned long page = get_zeroed_page(GFP_KERNEL);
973
974                         if (!page)
975                                 goto out_err;
976
977                         per_cpu(iommu_batch, i).pglist = (u64 *) page;
978                 }
979                 iommu_batch_initialized = 1;
980         }
981
982         pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
983         if (!pbm) {
984                 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
985                 goto out_err;
986         }
987
988         iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
989         if (!iommu) {
990                 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
991                 goto out_free_controller;
992         }
993
994         pbm->iommu = iommu;
995
996         err = pci_sun4v_pbm_init(pbm, op, devhandle);
997         if (err)
998                 goto out_free_iommu;
999
1000         dev_set_drvdata(&op->dev, pbm);
1001
1002         return 0;
1003
1004 out_free_iommu:
1005         kfree(pbm->iommu);
1006
1007 out_free_controller:
1008         kfree(pbm);
1009
1010 out_err:
1011         return err;
1012 }
1013
1014 static struct of_device_id __initdata pci_sun4v_match[] = {
1015         {
1016                 .name = "pci",
1017                 .compatible = "SUNW,sun4v-pci",
1018         },
1019         {},
1020 };
1021
1022 static struct of_platform_driver pci_sun4v_driver = {
1023         .name           = DRIVER_NAME,
1024         .match_table    = pci_sun4v_match,
1025         .probe          = pci_sun4v_probe,
1026 };
1027
1028 static int __init pci_sun4v_init(void)
1029 {
1030         return of_register_driver(&pci_sun4v_driver, &of_bus_type);
1031 }
1032
1033 subsys_initcall(pci_sun4v_init);