1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
15 #include <asm/iommu.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
23 #include "iommu_common.h"
25 #include "pci_sun4v.h"
27 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
33 static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
35 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
37 unsigned long n, i, start, end, limit;
45 n = find_next_zero_bit(arena->map, limit, start);
47 if (unlikely(end >= limit)) {
48 if (likely(pass < 1)) {
54 /* Scanned the whole thing, give up. */
59 for (i = n; i < end; i++) {
60 if (test_bit(i, arena->map)) {
66 for (i = n; i < end; i++)
67 __set_bit(i, arena->map);
74 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
78 for (i = base; i < (base + npages); i++)
79 __clear_bit(i, arena->map);
82 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
84 struct pcidev_cookie *pcp;
85 struct pci_iommu *iommu;
86 unsigned long flags, order, first_page, npages, n;
93 size = IO_PAGE_ALIGN(size);
94 order = get_order(size);
95 if (order >= MAX_ORDER)
98 npages = size >> IO_PAGE_SHIFT;
99 if (npages > PGLIST_NENTS)
102 first_page = __get_free_pages(GFP_ATOMIC, order);
103 if (first_page == 0UL)
105 memset((char *)first_page, 0, PAGE_SIZE << order);
108 devhandle = pcp->pbm->devhandle;
109 iommu = pcp->pbm->iommu;
111 spin_lock_irqsave(&iommu->lock, flags);
112 entry = pci_arena_alloc(&iommu->arena, npages);
113 spin_unlock_irqrestore(&iommu->lock, flags);
115 if (unlikely(entry < 0L)) {
116 free_pages(first_page, order);
120 *dma_addrp = (iommu->page_table_map_base +
121 (entry << IO_PAGE_SHIFT));
122 ret = (void *) first_page;
123 first_page = __pa(first_page);
127 pglist = __get_cpu_var(iommu_pglists).pglist;
128 for (n = 0; n < npages; n++)
129 pglist[n] = first_page + (n * PAGE_SIZE);
134 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
136 (HV_PCI_MAP_ATTR_READ |
137 HV_PCI_MAP_ATTR_WRITE),
142 } while (npages != 0);
149 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
151 struct pcidev_cookie *pcp;
152 struct pci_iommu *iommu;
153 unsigned long flags, order, npages, entry;
156 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
158 iommu = pcp->pbm->iommu;
159 devhandle = pcp->pbm->devhandle;
160 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
162 spin_lock_irqsave(&iommu->lock, flags);
164 pci_arena_free(&iommu->arena, entry, npages);
169 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
173 } while (npages != 0);
175 spin_unlock_irqrestore(&iommu->lock, flags);
177 order = get_order(size);
179 free_pages((unsigned long)cpu, order);
182 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
184 struct pcidev_cookie *pcp;
185 struct pci_iommu *iommu;
186 unsigned long flags, npages, oaddr;
187 unsigned long i, base_paddr;
188 u32 devhandle, bus_addr, ret;
195 iommu = pcp->pbm->iommu;
196 devhandle = pcp->pbm->devhandle;
198 if (unlikely(direction == PCI_DMA_NONE))
201 oaddr = (unsigned long)ptr;
202 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
203 npages >>= IO_PAGE_SHIFT;
204 if (unlikely(npages > PGLIST_NENTS))
207 spin_lock_irqsave(&iommu->lock, flags);
208 entry = pci_arena_alloc(&iommu->arena, npages);
209 spin_unlock_irqrestore(&iommu->lock, flags);
211 if (unlikely(entry < 0L))
214 bus_addr = (iommu->page_table_map_base +
215 (entry << IO_PAGE_SHIFT));
216 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
217 base_paddr = __pa(oaddr & IO_PAGE_MASK);
218 prot = HV_PCI_MAP_ATTR_READ;
219 if (direction != PCI_DMA_TODEVICE)
220 prot |= HV_PCI_MAP_ATTR_WRITE;
224 pglist = __get_cpu_var(iommu_pglists).pglist;
225 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
226 pglist[i] = base_paddr;
231 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
237 } while (npages != 0);
244 if (printk_ratelimit())
246 return PCI_DMA_ERROR_CODE;
249 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
251 struct pcidev_cookie *pcp;
252 struct pci_iommu *iommu;
253 unsigned long flags, npages;
257 if (unlikely(direction == PCI_DMA_NONE)) {
258 if (printk_ratelimit())
264 iommu = pcp->pbm->iommu;
265 devhandle = pcp->pbm->devhandle;
267 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
268 npages >>= IO_PAGE_SHIFT;
269 bus_addr &= IO_PAGE_MASK;
271 spin_lock_irqsave(&iommu->lock, flags);
273 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
274 pci_arena_free(&iommu->arena, entry, npages);
279 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
283 } while (npages != 0);
285 spin_unlock_irqrestore(&iommu->lock, flags);
288 #define SG_ENT_PHYS_ADDRESS(SG) \
289 (__pa(page_address((SG)->page)) + (SG)->offset)
291 static inline void fill_sg(long entry, u32 devhandle,
292 struct scatterlist *sg,
293 int nused, int nelems, unsigned long prot)
295 struct scatterlist *dma_sg = sg;
296 struct scatterlist *sg_end = sg + nelems;
297 int i, cpu, pglist_ent;
301 pglist = __get_cpu_var(iommu_pglists).pglist;
303 for (i = 0; i < nused; i++) {
304 unsigned long pteval = ~0UL;
307 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
309 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
311 unsigned long offset;
314 /* If we are here, we know we have at least one
315 * more page to map. So walk forward until we
316 * hit a page crossing, and begin creating new
317 * mappings from that spot.
322 tmp = SG_ENT_PHYS_ADDRESS(sg);
324 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
325 pteval = tmp & IO_PAGE_MASK;
326 offset = tmp & (IO_PAGE_SIZE - 1UL);
329 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
330 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
332 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
338 pteval = (pteval & IOPTE_PAGE);
340 pglist[pglist_ent++] = pteval;
341 pteval += IO_PAGE_SIZE;
342 len -= (IO_PAGE_SIZE - offset);
347 pteval = (pteval & IOPTE_PAGE) + len;
350 /* Skip over any tail mappings we've fully mapped,
351 * adjusting pteval along the way. Stop when we
352 * detect a page crossing event.
354 while (sg < sg_end &&
355 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
356 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
358 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
359 pteval += sg->length;
362 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
364 } while (dma_npages != 0);
368 BUG_ON(pglist_ent == 0);
373 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
377 } while (pglist_ent != 0);
382 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
384 struct pcidev_cookie *pcp;
385 struct pci_iommu *iommu;
386 unsigned long flags, npages, prot;
387 u32 devhandle, dma_base;
388 struct scatterlist *sgtmp;
392 /* Fast path single entry scatterlists. */
394 sglist->dma_address =
395 pci_4v_map_single(pdev,
396 (page_address(sglist->page) + sglist->offset),
397 sglist->length, direction);
398 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
400 sglist->dma_length = sglist->length;
405 iommu = pcp->pbm->iommu;
406 devhandle = pcp->pbm->devhandle;
408 if (unlikely(direction == PCI_DMA_NONE))
411 /* Step 1: Prepare scatter list. */
412 npages = prepare_sg(sglist, nelems);
413 if (unlikely(npages > PGLIST_NENTS))
416 /* Step 2: Allocate a cluster and context, if necessary. */
417 spin_lock_irqsave(&iommu->lock, flags);
418 entry = pci_arena_alloc(&iommu->arena, npages);
419 spin_unlock_irqrestore(&iommu->lock, flags);
421 if (unlikely(entry < 0L))
424 dma_base = iommu->page_table_map_base +
425 (entry << IO_PAGE_SHIFT);
427 /* Step 3: Normalize DMA addresses. */
431 while (used && sgtmp->dma_length) {
432 sgtmp->dma_address += dma_base;
436 used = nelems - used;
438 /* Step 4: Create the mappings. */
439 prot = HV_PCI_MAP_ATTR_READ;
440 if (direction != PCI_DMA_TODEVICE)
441 prot |= HV_PCI_MAP_ATTR_WRITE;
443 fill_sg(entry, devhandle, sglist, used, nelems, prot);
448 if (printk_ratelimit())
453 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
455 struct pcidev_cookie *pcp;
456 struct pci_iommu *iommu;
457 unsigned long flags, i, npages;
459 u32 devhandle, bus_addr;
461 if (unlikely(direction == PCI_DMA_NONE)) {
462 if (printk_ratelimit())
467 iommu = pcp->pbm->iommu;
468 devhandle = pcp->pbm->devhandle;
470 bus_addr = sglist->dma_address & IO_PAGE_MASK;
472 for (i = 1; i < nelems; i++)
473 if (sglist[i].dma_length == 0)
476 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
477 bus_addr) >> IO_PAGE_SHIFT;
479 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
481 spin_lock_irqsave(&iommu->lock, flags);
483 pci_arena_free(&iommu->arena, entry, npages);
488 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
492 } while (npages != 0);
494 spin_unlock_irqrestore(&iommu->lock, flags);
497 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
499 /* Nothing to do... */
502 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
504 /* Nothing to do... */
507 struct pci_iommu_ops pci_sun4v_iommu_ops = {
508 .alloc_consistent = pci_4v_alloc_consistent,
509 .free_consistent = pci_4v_free_consistent,
510 .map_single = pci_4v_map_single,
511 .unmap_single = pci_4v_unmap_single,
512 .map_sg = pci_4v_map_sg,
513 .unmap_sg = pci_4v_unmap_sg,
514 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
515 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
518 /* SUN4V PCI configuration space accessors. */
520 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
522 if (bus == pbm->pci_first_busno) {
523 if (device == 0 && func == 0)
528 if (bus < pbm->pci_first_busno ||
529 bus > pbm->pci_last_busno)
534 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
535 int where, int size, u32 *value)
537 struct pci_pbm_info *pbm = bus_dev->sysdata;
538 u32 devhandle = pbm->devhandle;
539 unsigned int bus = bus_dev->number;
540 unsigned int device = PCI_SLOT(devfn);
541 unsigned int func = PCI_FUNC(devfn);
544 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
547 ret = pci_sun4v_config_get(devhandle,
548 HV_PCI_DEVICE_BUILD(bus, device, func),
551 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
552 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
561 *value = ret & 0xffff;
564 *value = ret & 0xffffffff;
569 return PCIBIOS_SUCCESSFUL;
572 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
573 int where, int size, u32 value)
575 struct pci_pbm_info *pbm = bus_dev->sysdata;
576 u32 devhandle = pbm->devhandle;
577 unsigned int bus = bus_dev->number;
578 unsigned int device = PCI_SLOT(devfn);
579 unsigned int func = PCI_FUNC(devfn);
582 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
585 ret = pci_sun4v_config_put(devhandle,
586 HV_PCI_DEVICE_BUILD(bus, device, func),
589 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
590 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
591 where, size, value, ret);
594 return PCIBIOS_SUCCESSFUL;
597 static struct pci_ops pci_sun4v_ops = {
598 .read = pci_sun4v_read_pci_cfg,
599 .write = pci_sun4v_write_pci_cfg,
603 static void pbm_scan_bus(struct pci_controller_info *p,
604 struct pci_pbm_info *pbm)
606 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
609 prom_printf("%s: Critical allocation failure.\n", pbm->name);
613 /* All we care about is the PBM. */
614 memset(cookie, 0, sizeof(*cookie));
617 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
619 pci_fixup_host_bridge_self(pbm->pci_bus);
620 pbm->pci_bus->self->sysdata = cookie;
622 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
624 pci_record_assignments(pbm, pbm->pci_bus);
625 pci_assign_unassigned(pbm, pbm->pci_bus);
626 pci_fixup_irq(pbm, pbm->pci_bus);
627 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
628 pci_setup_busmastering(pbm, pbm->pci_bus);
631 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
633 if (p->pbm_A.prom_node) {
634 p->pbm_A.is_66mhz_capable =
635 prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
637 pbm_scan_bus(p, &p->pbm_A);
639 if (p->pbm_B.prom_node) {
640 p->pbm_B.is_66mhz_capable =
641 prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
643 pbm_scan_bus(p, &p->pbm_B);
646 /* XXX register error interrupt handlers XXX */
649 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
650 struct pci_dev *pdev,
653 u32 devhandle = pbm->devhandle;
658 switch ((pdev->class >> 16) & 0xff) {
659 case PCI_BASE_CLASS_STORAGE:
663 case PCI_BASE_CLASS_NETWORK:
667 case PCI_BASE_CLASS_DISPLAY:
671 case PCI_BASE_CLASS_MULTIMEDIA:
672 case PCI_BASE_CLASS_MEMORY:
673 case PCI_BASE_CLASS_BRIDGE:
674 case PCI_BASE_CLASS_SERIAL:
683 BUG_ON(PIL_RESERVED(pil));
685 return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
688 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
690 struct pcidev_cookie *pcp = pdev->sysdata;
691 struct pci_pbm_info *pbm = pcp->pbm;
692 struct resource *res, *root;
694 int where, size, is_64bit;
696 res = &pdev->resource[resource];
698 where = PCI_BASE_ADDRESS_0 + (resource * 4);
699 } else if (resource == PCI_ROM_RESOURCE) {
700 where = pdev->rom_base_reg;
702 /* Somebody might have asked allocation of a non-standard resource */
706 /* XXX 64-bit MEM handling is not %100 correct... XXX */
708 if (res->flags & IORESOURCE_IO)
709 root = &pbm->io_space;
711 root = &pbm->mem_space;
712 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
713 == PCI_BASE_ADDRESS_MEM_TYPE_64)
717 size = res->end - res->start;
718 pci_read_config_dword(pdev, where, ®);
719 reg = ((reg & size) |
720 (((u32)(res->start - root->start)) & ~size));
721 if (resource == PCI_ROM_RESOURCE) {
722 reg |= PCI_ROM_ADDRESS_ENABLE;
723 res->flags |= IORESOURCE_ROM_ENABLE;
725 pci_write_config_dword(pdev, where, reg);
727 /* This knows that the upper 32-bits of the address
728 * must be zero. Our PCI common layer enforces this.
731 pci_write_config_dword(pdev, where + 4, 0);
734 static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
735 struct resource *res,
736 struct resource *root)
738 res->start += root->start;
739 res->end += root->start;
742 /* Use ranges property to determine where PCI MEM, I/O, and Config
743 * space are for this PCI bus module.
745 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
747 int i, saw_mem, saw_io;
749 saw_mem = saw_io = 0;
750 for (i = 0; i < pbm->num_pbm_ranges; i++) {
751 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
755 type = (pr->child_phys_hi >> 24) & 0x3;
756 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
757 ((unsigned long)pr->parent_phys_lo << 0UL));
761 /* 16-bit IO space, 16MB */
762 pbm->io_space.start = a;
763 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
764 pbm->io_space.flags = IORESOURCE_IO;
769 /* 32-bit MEM space, 2GB */
770 pbm->mem_space.start = a;
771 pbm->mem_space.end = a + (0x80000000UL - 1UL);
772 pbm->mem_space.flags = IORESOURCE_MEM;
777 /* XXX 64-bit MEM handling XXX */
784 if (!saw_io || !saw_mem) {
785 prom_printf("%s: Fatal error, missing %s PBM range.\n",
787 (!saw_io ? "IO" : "MEM"));
791 printk("%s: PCI IO[%lx] MEM[%lx]\n",
794 pbm->mem_space.start);
797 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
798 struct pci_pbm_info *pbm)
800 pbm->io_space.name = pbm->mem_space.name = pbm->name;
802 request_resource(&ioport_resource, &pbm->io_space);
803 request_resource(&iomem_resource, &pbm->mem_space);
804 pci_register_legacy_regions(&pbm->io_space,
808 static void probe_existing_entries(struct pci_pbm_info *pbm,
809 struct pci_iommu *iommu)
811 struct pci_iommu_arena *arena = &iommu->arena;
815 devhandle = pbm->devhandle;
816 for (i = 0; i < arena->limit; i++) {
817 unsigned long ret, io_attrs, ra;
819 ret = pci_sun4v_iommu_getmap(devhandle,
823 __set_bit(i, arena->map);
827 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
829 struct pci_iommu *iommu = pbm->iommu;
830 unsigned long num_tsb_entries, sz;
831 u32 vdma[2], dma_mask, dma_offset;
834 err = prom_getproperty(pbm->prom_node, "virtual-dma",
835 (char *)&vdma[0], sizeof(vdma));
836 if (err == 0 || err == -1) {
837 /* No property, use default values. */
838 vdma[0] = 0x80000000;
839 vdma[1] = 0x80000000;
845 dma_mask |= 0x1fffffff;
850 dma_mask |= 0x3fffffff;
855 dma_mask |= 0x7fffffff;
860 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
864 num_tsb_entries = tsbsize / sizeof(iopte_t);
866 dma_offset = vdma[0];
868 /* Setup initial software IOMMU state. */
869 spin_lock_init(&iommu->lock);
870 iommu->ctx_lowest_free = 1;
871 iommu->page_table_map_base = dma_offset;
872 iommu->dma_addr_mask = dma_mask;
874 /* Allocate and initialize the free area map. */
875 sz = num_tsb_entries / 8;
876 sz = (sz + 7UL) & ~7UL;
877 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
878 if (!iommu->arena.map) {
879 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
882 memset(iommu->arena.map, 0, sz);
883 iommu->arena.limit = num_tsb_entries;
885 probe_existing_entries(pbm, iommu);
888 static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
890 unsigned int busrange[2];
891 int prom_node = pbm->prom_node;
894 err = prom_getproperty(prom_node, "bus-range",
895 (char *)&busrange[0],
897 if (err == 0 || err == -1) {
898 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
902 pbm->pci_first_busno = busrange[0];
903 pbm->pci_last_busno = busrange[1];
907 static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle)
909 struct pci_pbm_info *pbm;
912 if (devhandle & 0x40)
918 pbm->prom_node = prom_node;
919 pbm->pci_first_slot = 1;
921 pbm->devhandle = devhandle;
923 sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
924 p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
926 printk("%s: devhandle[%x] prom_node[%x:%x]\n",
927 pbm->name, pbm->devhandle,
928 pbm->prom_node, prom_getchild(pbm->prom_node));
930 prom_getstring(prom_node, "name",
931 pbm->prom_name, sizeof(pbm->prom_name));
933 err = prom_getproperty(prom_node, "ranges",
934 (char *) pbm->pbm_ranges,
935 sizeof(pbm->pbm_ranges));
936 if (err == 0 || err == -1) {
937 prom_printf("%s: Fatal error, no ranges property.\n",
942 pbm->num_pbm_ranges =
943 (err / sizeof(struct linux_prom_pci_ranges));
945 /* Mask out the top 8 bits of the ranges, leaving the real
948 for (i = 0; i < pbm->num_pbm_ranges; i++)
949 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
951 pci_sun4v_determine_mem_io_space(pbm);
952 pbm_register_toplevel_resources(p, pbm);
954 err = prom_getproperty(prom_node, "interrupt-map",
955 (char *)pbm->pbm_intmap,
956 sizeof(pbm->pbm_intmap));
957 if (err == 0 || err == -1) {
958 prom_printf("%s: Fatal error, no interrupt-map property.\n",
963 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
964 err = prom_getproperty(prom_node, "interrupt-map-mask",
965 (char *)&pbm->pbm_intmask,
966 sizeof(pbm->pbm_intmask));
967 if (err == 0 || err == -1) {
968 prom_printf("%s: Fatal error, no interrupt-map-mask.\n",
973 pci_sun4v_get_bus_range(pbm);
974 pci_sun4v_iommu_init(pbm);
977 void sun4v_pci_init(int node, char *model_name)
979 struct pci_controller_info *p;
980 struct pci_iommu *iommu;
981 struct linux_prom64_registers regs;
985 prom_getproperty(node, "reg", (char *)®s, sizeof(regs));
986 devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
988 for (p = pci_controller_root; p; p = p->next) {
989 struct pci_pbm_info *pbm;
991 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
994 pbm = (p->pbm_A.prom_node ?
998 if (pbm->devhandle == (devhandle ^ 0x40)) {
999 pci_sun4v_pbm_init(p, node, devhandle);
1004 for (i = 0; i < NR_CPUS; i++) {
1005 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1008 goto fatal_memory_error;
1010 per_cpu(iommu_pglists, i).pglist = (u64 *) page;
1013 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1015 goto fatal_memory_error;
1017 memset(p, 0, sizeof(*p));
1019 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1021 goto fatal_memory_error;
1023 memset(iommu, 0, sizeof(*iommu));
1024 p->pbm_A.iommu = iommu;
1026 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1028 goto fatal_memory_error;
1030 memset(iommu, 0, sizeof(*iommu));
1031 p->pbm_B.iommu = iommu;
1033 p->next = pci_controller_root;
1034 pci_controller_root = p;
1036 p->index = pci_num_controllers++;
1037 p->pbms_same_domain = 0;
1039 p->scan_bus = pci_sun4v_scan_bus;
1040 p->irq_build = pci_sun4v_irq_build;
1041 p->base_address_update = pci_sun4v_base_address_update;
1042 p->resource_adjust = pci_sun4v_resource_adjust;
1043 p->pci_ops = &pci_sun4v_ops;
1045 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1048 pci_memspace_mask = 0x7fffffffUL;
1050 pci_sun4v_pbm_init(p, node, devhandle);
1054 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");