1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
15 #include <asm/iommu.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
24 #include "iommu_common.h"
26 #include "pci_sun4v.h"
28 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
30 struct pci_iommu_batch {
31 struct pci_dev *pdev; /* Device mapping is for. */
32 unsigned long prot; /* IOMMU page protections */
33 unsigned long entry; /* Index into IOTSB. */
34 u64 *pglist; /* List of physical pages */
35 unsigned long npages; /* Number of pages in list. */
38 static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
40 /* Interrupts must be disabled. */
41 static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
43 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
51 /* Interrupts must be disabled. */
52 static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
54 struct pcidev_cookie *pcp = p->pdev->sysdata;
55 unsigned long devhandle = pcp->pbm->devhandle;
56 unsigned long prot = p->prot;
57 unsigned long entry = p->entry;
58 u64 *pglist = p->pglist;
59 unsigned long npages = p->npages;
64 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
65 npages, prot, __pa(pglist));
66 if (unlikely(num < 0)) {
67 if (printk_ratelimit())
68 printk("pci_iommu_batch_flush: IOMMU map of "
69 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
71 devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist), num);
87 /* Interrupts must be disabled. */
88 static inline long pci_iommu_batch_add(u64 phys_page)
90 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
92 BUG_ON(p->npages >= PGLIST_NENTS);
94 p->pglist[p->npages++] = phys_page;
95 if (p->npages == PGLIST_NENTS)
96 return pci_iommu_batch_flush(p);
101 /* Interrupts must be disabled. */
102 static inline long pci_iommu_batch_end(void)
104 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
106 BUG_ON(p->npages >= PGLIST_NENTS);
108 return pci_iommu_batch_flush(p);
111 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
113 unsigned long n, i, start, end, limit;
116 limit = arena->limit;
121 n = find_next_zero_bit(arena->map, limit, start);
123 if (unlikely(end >= limit)) {
124 if (likely(pass < 1)) {
130 /* Scanned the whole thing, give up. */
135 for (i = n; i < end; i++) {
136 if (test_bit(i, arena->map)) {
142 for (i = n; i < end; i++)
143 __set_bit(i, arena->map);
150 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
154 for (i = base; i < (base + npages); i++)
155 __clear_bit(i, arena->map);
158 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
160 struct pcidev_cookie *pcp;
161 struct pci_iommu *iommu;
162 unsigned long flags, order, first_page, npages, n;
166 size = IO_PAGE_ALIGN(size);
167 order = get_order(size);
168 if (unlikely(order >= MAX_ORDER))
171 npages = size >> IO_PAGE_SHIFT;
173 first_page = __get_free_pages(gfp, order);
174 if (unlikely(first_page == 0UL))
177 memset((char *)first_page, 0, PAGE_SIZE << order);
180 iommu = pcp->pbm->iommu;
182 spin_lock_irqsave(&iommu->lock, flags);
183 entry = pci_arena_alloc(&iommu->arena, npages);
184 spin_unlock_irqrestore(&iommu->lock, flags);
186 if (unlikely(entry < 0L))
187 goto arena_alloc_fail;
189 *dma_addrp = (iommu->page_table_map_base +
190 (entry << IO_PAGE_SHIFT));
191 ret = (void *) first_page;
192 first_page = __pa(first_page);
194 local_irq_save(flags);
196 pci_iommu_batch_start(pdev,
197 (HV_PCI_MAP_ATTR_READ |
198 HV_PCI_MAP_ATTR_WRITE),
201 for (n = 0; n < npages; n++) {
202 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
203 if (unlikely(err < 0L))
207 if (unlikely(pci_iommu_batch_end() < 0L))
210 local_irq_restore(flags);
215 /* Interrupts are disabled. */
216 spin_lock(&iommu->lock);
217 pci_arena_free(&iommu->arena, entry, npages);
218 spin_unlock_irqrestore(&iommu->lock, flags);
221 free_pages(first_page, order);
225 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
227 struct pcidev_cookie *pcp;
228 struct pci_iommu *iommu;
229 unsigned long flags, order, npages, entry;
232 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
234 iommu = pcp->pbm->iommu;
235 devhandle = pcp->pbm->devhandle;
236 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
238 spin_lock_irqsave(&iommu->lock, flags);
240 pci_arena_free(&iommu->arena, entry, npages);
245 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
249 } while (npages != 0);
251 spin_unlock_irqrestore(&iommu->lock, flags);
253 order = get_order(size);
255 free_pages((unsigned long)cpu, order);
258 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
260 struct pcidev_cookie *pcp;
261 struct pci_iommu *iommu;
262 unsigned long flags, npages, oaddr;
263 unsigned long i, base_paddr;
269 iommu = pcp->pbm->iommu;
271 if (unlikely(direction == PCI_DMA_NONE))
274 oaddr = (unsigned long)ptr;
275 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
276 npages >>= IO_PAGE_SHIFT;
278 spin_lock_irqsave(&iommu->lock, flags);
279 entry = pci_arena_alloc(&iommu->arena, npages);
280 spin_unlock_irqrestore(&iommu->lock, flags);
282 if (unlikely(entry < 0L))
285 bus_addr = (iommu->page_table_map_base +
286 (entry << IO_PAGE_SHIFT));
287 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
288 base_paddr = __pa(oaddr & IO_PAGE_MASK);
289 prot = HV_PCI_MAP_ATTR_READ;
290 if (direction != PCI_DMA_TODEVICE)
291 prot |= HV_PCI_MAP_ATTR_WRITE;
293 local_irq_save(flags);
295 pci_iommu_batch_start(pdev, prot, entry);
297 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
298 long err = pci_iommu_batch_add(base_paddr);
299 if (unlikely(err < 0L))
302 if (unlikely(pci_iommu_batch_end() < 0L))
305 local_irq_restore(flags);
310 if (printk_ratelimit())
312 return PCI_DMA_ERROR_CODE;
315 /* Interrupts are disabled. */
316 spin_lock(&iommu->lock);
317 pci_arena_free(&iommu->arena, entry, npages);
318 spin_unlock_irqrestore(&iommu->lock, flags);
320 return PCI_DMA_ERROR_CODE;
323 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
325 struct pcidev_cookie *pcp;
326 struct pci_iommu *iommu;
327 unsigned long flags, npages;
331 if (unlikely(direction == PCI_DMA_NONE)) {
332 if (printk_ratelimit())
338 iommu = pcp->pbm->iommu;
339 devhandle = pcp->pbm->devhandle;
341 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
342 npages >>= IO_PAGE_SHIFT;
343 bus_addr &= IO_PAGE_MASK;
345 spin_lock_irqsave(&iommu->lock, flags);
347 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
348 pci_arena_free(&iommu->arena, entry, npages);
353 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
357 } while (npages != 0);
359 spin_unlock_irqrestore(&iommu->lock, flags);
362 #define SG_ENT_PHYS_ADDRESS(SG) \
363 (__pa(page_address((SG)->page)) + (SG)->offset)
365 static inline long fill_sg(long entry, struct pci_dev *pdev,
366 struct scatterlist *sg,
367 int nused, int nelems, unsigned long prot)
369 struct scatterlist *dma_sg = sg;
370 struct scatterlist *sg_end = sg + nelems;
374 local_irq_save(flags);
376 pci_iommu_batch_start(pdev, prot, entry);
378 for (i = 0; i < nused; i++) {
379 unsigned long pteval = ~0UL;
382 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
384 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
386 unsigned long offset;
389 /* If we are here, we know we have at least one
390 * more page to map. So walk forward until we
391 * hit a page crossing, and begin creating new
392 * mappings from that spot.
397 tmp = SG_ENT_PHYS_ADDRESS(sg);
399 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
400 pteval = tmp & IO_PAGE_MASK;
401 offset = tmp & (IO_PAGE_SIZE - 1UL);
404 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
405 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
407 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
413 pteval = (pteval & IOPTE_PAGE);
417 err = pci_iommu_batch_add(pteval);
418 if (unlikely(err < 0L))
419 goto iommu_map_failed;
421 pteval += IO_PAGE_SIZE;
422 len -= (IO_PAGE_SIZE - offset);
427 pteval = (pteval & IOPTE_PAGE) + len;
430 /* Skip over any tail mappings we've fully mapped,
431 * adjusting pteval along the way. Stop when we
432 * detect a page crossing event.
434 while (sg < sg_end &&
435 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
436 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
438 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
439 pteval += sg->length;
442 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
444 } while (dma_npages != 0);
448 if (unlikely(pci_iommu_batch_end() < 0L))
449 goto iommu_map_failed;
451 local_irq_restore(flags);
455 local_irq_restore(flags);
459 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
461 struct pcidev_cookie *pcp;
462 struct pci_iommu *iommu;
463 unsigned long flags, npages, prot;
465 struct scatterlist *sgtmp;
469 /* Fast path single entry scatterlists. */
471 sglist->dma_address =
472 pci_4v_map_single(pdev,
473 (page_address(sglist->page) + sglist->offset),
474 sglist->length, direction);
475 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
477 sglist->dma_length = sglist->length;
482 iommu = pcp->pbm->iommu;
484 if (unlikely(direction == PCI_DMA_NONE))
487 /* Step 1: Prepare scatter list. */
488 npages = prepare_sg(sglist, nelems);
490 /* Step 2: Allocate a cluster and context, if necessary. */
491 spin_lock_irqsave(&iommu->lock, flags);
492 entry = pci_arena_alloc(&iommu->arena, npages);
493 spin_unlock_irqrestore(&iommu->lock, flags);
495 if (unlikely(entry < 0L))
498 dma_base = iommu->page_table_map_base +
499 (entry << IO_PAGE_SHIFT);
501 /* Step 3: Normalize DMA addresses. */
505 while (used && sgtmp->dma_length) {
506 sgtmp->dma_address += dma_base;
510 used = nelems - used;
512 /* Step 4: Create the mappings. */
513 prot = HV_PCI_MAP_ATTR_READ;
514 if (direction != PCI_DMA_TODEVICE)
515 prot |= HV_PCI_MAP_ATTR_WRITE;
517 err = fill_sg(entry, pdev, sglist, used, nelems, prot);
518 if (unlikely(err < 0L))
519 goto iommu_map_failed;
524 if (printk_ratelimit())
529 spin_lock_irqsave(&iommu->lock, flags);
530 pci_arena_free(&iommu->arena, entry, npages);
531 spin_unlock_irqrestore(&iommu->lock, flags);
536 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
538 struct pcidev_cookie *pcp;
539 struct pci_iommu *iommu;
540 unsigned long flags, i, npages;
542 u32 devhandle, bus_addr;
544 if (unlikely(direction == PCI_DMA_NONE)) {
545 if (printk_ratelimit())
550 iommu = pcp->pbm->iommu;
551 devhandle = pcp->pbm->devhandle;
553 bus_addr = sglist->dma_address & IO_PAGE_MASK;
555 for (i = 1; i < nelems; i++)
556 if (sglist[i].dma_length == 0)
559 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
560 bus_addr) >> IO_PAGE_SHIFT;
562 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
564 spin_lock_irqsave(&iommu->lock, flags);
566 pci_arena_free(&iommu->arena, entry, npages);
571 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
575 } while (npages != 0);
577 spin_unlock_irqrestore(&iommu->lock, flags);
580 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
582 /* Nothing to do... */
585 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
587 /* Nothing to do... */
590 struct pci_iommu_ops pci_sun4v_iommu_ops = {
591 .alloc_consistent = pci_4v_alloc_consistent,
592 .free_consistent = pci_4v_free_consistent,
593 .map_single = pci_4v_map_single,
594 .unmap_single = pci_4v_unmap_single,
595 .map_sg = pci_4v_map_sg,
596 .unmap_sg = pci_4v_unmap_sg,
597 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
598 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
601 /* SUN4V PCI configuration space accessors. */
604 struct pdev_entry *next;
611 #define PDEV_HTAB_SIZE 16
612 #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
613 static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
615 static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
619 val = (devhandle ^ (devhandle >> 4));
624 return val & PDEV_HTAB_MASK;
627 static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
629 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
630 struct pdev_entry **slot;
635 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
639 p->devhandle = devhandle;
647 /* Recursively descend into the OBP device tree, rooted at toplevel_node,
648 * looking for a PCI device matching bus and devfn.
650 static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
652 toplevel_node = toplevel_node->child;
654 while (toplevel_node != NULL) {
655 struct linux_prom_pci_registers *regs;
656 struct property *prop;
659 ret = obp_find(toplevel_node, bus, devfn);
663 prop = of_find_property(toplevel_node, "reg", NULL);
668 if (((regs->phys_hi >> 16) & 0xff) == bus &&
669 ((regs->phys_hi >> 8) & 0xff) == devfn)
673 toplevel_node = toplevel_node->sibling;
676 return toplevel_node != NULL;
679 static int pdev_htab_populate(struct pci_pbm_info *pbm)
681 u32 devhandle = pbm->devhandle;
684 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
687 for (devfn = 0; devfn < 256; devfn++) {
688 unsigned int device = PCI_SLOT(devfn);
689 unsigned int func = PCI_FUNC(devfn);
691 if (obp_find(pbm->prom_node, bus, devfn)) {
692 int err = pdev_htab_add(devhandle, bus,
703 static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
705 struct pdev_entry *p;
707 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
709 if (p->devhandle == devhandle &&
711 p->device == device &&
721 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
723 if (bus < pbm->pci_first_busno ||
724 bus > pbm->pci_last_busno)
726 return pdev_find(pbm->devhandle, bus, device, func) == NULL;
729 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
730 int where, int size, u32 *value)
732 struct pci_pbm_info *pbm = bus_dev->sysdata;
733 u32 devhandle = pbm->devhandle;
734 unsigned int bus = bus_dev->number;
735 unsigned int device = PCI_SLOT(devfn);
736 unsigned int func = PCI_FUNC(devfn);
739 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
742 ret = pci_sun4v_config_get(devhandle,
743 HV_PCI_DEVICE_BUILD(bus, device, func),
746 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
747 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
756 *value = ret & 0xffff;
759 *value = ret & 0xffffffff;
764 return PCIBIOS_SUCCESSFUL;
767 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
768 int where, int size, u32 value)
770 struct pci_pbm_info *pbm = bus_dev->sysdata;
771 u32 devhandle = pbm->devhandle;
772 unsigned int bus = bus_dev->number;
773 unsigned int device = PCI_SLOT(devfn);
774 unsigned int func = PCI_FUNC(devfn);
777 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
780 ret = pci_sun4v_config_put(devhandle,
781 HV_PCI_DEVICE_BUILD(bus, device, func),
784 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
785 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
786 where, size, value, ret);
789 return PCIBIOS_SUCCESSFUL;
792 static struct pci_ops pci_sun4v_ops = {
793 .read = pci_sun4v_read_pci_cfg,
794 .write = pci_sun4v_write_pci_cfg,
798 static void pbm_scan_bus(struct pci_controller_info *p,
799 struct pci_pbm_info *pbm)
801 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
804 prom_printf("%s: Critical allocation failure.\n", pbm->name);
808 /* All we care about is the PBM. */
809 memset(cookie, 0, sizeof(*cookie));
812 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
814 pci_fixup_host_bridge_self(pbm->pci_bus);
815 pbm->pci_bus->self->sysdata = cookie;
817 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
818 pci_record_assignments(pbm, pbm->pci_bus);
819 pci_assign_unassigned(pbm, pbm->pci_bus);
820 pci_fixup_irq(pbm, pbm->pci_bus);
821 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
822 pci_setup_busmastering(pbm, pbm->pci_bus);
825 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
827 struct property *prop;
828 struct device_node *dp;
830 if ((dp = p->pbm_A.prom_node) != NULL) {
831 prop = of_find_property(dp, "66mhz-capable", NULL);
832 p->pbm_A.is_66mhz_capable = (prop != NULL);
834 pbm_scan_bus(p, &p->pbm_A);
836 if ((dp = p->pbm_B.prom_node) != NULL) {
837 prop = of_find_property(dp, "66mhz-capable", NULL);
838 p->pbm_B.is_66mhz_capable = (prop != NULL);
840 pbm_scan_bus(p, &p->pbm_B);
843 /* XXX register error interrupt handlers XXX */
846 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
847 struct pci_dev *pdev,
850 u32 devhandle = pbm->devhandle;
852 return sun4v_build_irq(devhandle, devino);
855 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
857 struct pcidev_cookie *pcp = pdev->sysdata;
858 struct pci_pbm_info *pbm = pcp->pbm;
859 struct resource *res, *root;
861 int where, size, is_64bit;
863 res = &pdev->resource[resource];
865 where = PCI_BASE_ADDRESS_0 + (resource * 4);
866 } else if (resource == PCI_ROM_RESOURCE) {
867 where = pdev->rom_base_reg;
869 /* Somebody might have asked allocation of a non-standard resource */
873 /* XXX 64-bit MEM handling is not %100 correct... XXX */
875 if (res->flags & IORESOURCE_IO)
876 root = &pbm->io_space;
878 root = &pbm->mem_space;
879 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
880 == PCI_BASE_ADDRESS_MEM_TYPE_64)
884 size = res->end - res->start;
885 pci_read_config_dword(pdev, where, ®);
886 reg = ((reg & size) |
887 (((u32)(res->start - root->start)) & ~size));
888 if (resource == PCI_ROM_RESOURCE) {
889 reg |= PCI_ROM_ADDRESS_ENABLE;
890 res->flags |= IORESOURCE_ROM_ENABLE;
892 pci_write_config_dword(pdev, where, reg);
894 /* This knows that the upper 32-bits of the address
895 * must be zero. Our PCI common layer enforces this.
898 pci_write_config_dword(pdev, where + 4, 0);
901 static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
902 struct resource *res,
903 struct resource *root)
905 res->start += root->start;
906 res->end += root->start;
909 /* Use ranges property to determine where PCI MEM, I/O, and Config
910 * space are for this PCI bus module.
912 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
914 int i, saw_mem, saw_io;
916 saw_mem = saw_io = 0;
917 for (i = 0; i < pbm->num_pbm_ranges; i++) {
918 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
922 type = (pr->child_phys_hi >> 24) & 0x3;
923 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
924 ((unsigned long)pr->parent_phys_lo << 0UL));
928 /* 16-bit IO space, 16MB */
929 pbm->io_space.start = a;
930 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
931 pbm->io_space.flags = IORESOURCE_IO;
936 /* 32-bit MEM space, 2GB */
937 pbm->mem_space.start = a;
938 pbm->mem_space.end = a + (0x80000000UL - 1UL);
939 pbm->mem_space.flags = IORESOURCE_MEM;
944 /* XXX 64-bit MEM handling XXX */
951 if (!saw_io || !saw_mem) {
952 prom_printf("%s: Fatal error, missing %s PBM range.\n",
954 (!saw_io ? "IO" : "MEM"));
958 printk("%s: PCI IO[%lx] MEM[%lx]\n",
961 pbm->mem_space.start);
964 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
965 struct pci_pbm_info *pbm)
967 pbm->io_space.name = pbm->mem_space.name = pbm->name;
969 request_resource(&ioport_resource, &pbm->io_space);
970 request_resource(&iomem_resource, &pbm->mem_space);
971 pci_register_legacy_regions(&pbm->io_space,
975 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
976 struct pci_iommu *iommu)
978 struct pci_iommu_arena *arena = &iommu->arena;
979 unsigned long i, cnt = 0;
982 devhandle = pbm->devhandle;
983 for (i = 0; i < arena->limit; i++) {
984 unsigned long ret, io_attrs, ra;
986 ret = pci_sun4v_iommu_getmap(devhandle,
990 if (page_in_phys_avail(ra)) {
991 pci_sun4v_iommu_demap(devhandle,
992 HV_PCI_TSBID(0, i), 1);
995 __set_bit(i, arena->map);
1003 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
1005 struct pci_iommu *iommu = pbm->iommu;
1006 struct property *prop;
1007 unsigned long num_tsb_entries, sz;
1008 u32 vdma[2], dma_mask, dma_offset;
1011 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
1013 u32 *val = prop->value;
1018 /* No property, use default values. */
1019 vdma[0] = 0x80000000;
1020 vdma[1] = 0x80000000;
1026 dma_mask |= 0x1fffffff;
1031 dma_mask |= 0x3fffffff;
1036 dma_mask |= 0x7fffffff;
1041 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
1045 tsbsize *= (8 * 1024);
1047 num_tsb_entries = tsbsize / sizeof(iopte_t);
1049 dma_offset = vdma[0];
1051 /* Setup initial software IOMMU state. */
1052 spin_lock_init(&iommu->lock);
1053 iommu->ctx_lowest_free = 1;
1054 iommu->page_table_map_base = dma_offset;
1055 iommu->dma_addr_mask = dma_mask;
1057 /* Allocate and initialize the free area map. */
1058 sz = num_tsb_entries / 8;
1059 sz = (sz + 7UL) & ~7UL;
1060 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
1061 if (!iommu->arena.map) {
1062 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
1065 memset(iommu->arena.map, 0, sz);
1066 iommu->arena.limit = num_tsb_entries;
1068 sz = probe_existing_entries(pbm, iommu);
1070 printk("%s: Imported %lu TSB entries from OBP\n",
1074 static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
1076 struct property *prop;
1077 unsigned int *busrange;
1079 prop = of_find_property(pbm->prom_node, "bus-range", NULL);
1081 busrange = prop->value;
1083 pbm->pci_first_busno = busrange[0];
1084 pbm->pci_last_busno = busrange[1];
1088 static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1090 struct pci_pbm_info *pbm;
1091 struct property *prop;
1094 if (devhandle & 0x40)
1100 pbm->prom_node = dp;
1101 pbm->pci_first_slot = 1;
1103 pbm->devhandle = devhandle;
1105 pbm->name = dp->full_name;
1107 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1109 prop = of_find_property(dp, "ranges", &len);
1110 pbm->pbm_ranges = prop->value;
1111 pbm->num_pbm_ranges =
1112 (len / sizeof(struct linux_prom_pci_ranges));
1114 /* Mask out the top 8 bits of the ranges, leaving the real
1117 for (i = 0; i < pbm->num_pbm_ranges; i++)
1118 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
1120 pci_sun4v_determine_mem_io_space(pbm);
1121 pbm_register_toplevel_resources(p, pbm);
1123 prop = of_find_property(dp, "interrupt-map", &len);
1124 pbm->pbm_intmap = prop->value;
1125 pbm->num_pbm_intmap =
1126 (len / sizeof(struct linux_prom_pci_intmap));
1128 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1129 pbm->pbm_intmask = prop->value;
1131 pci_sun4v_get_bus_range(pbm);
1132 pci_sun4v_iommu_init(pbm);
1134 pdev_htab_populate(pbm);
1137 void sun4v_pci_init(struct device_node *dp, char *model_name)
1139 struct pci_controller_info *p;
1140 struct pci_iommu *iommu;
1141 struct property *prop;
1142 struct linux_prom64_registers *regs;
1146 prop = of_find_property(dp, "reg", NULL);
1149 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1151 for (p = pci_controller_root; p; p = p->next) {
1152 struct pci_pbm_info *pbm;
1154 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
1157 pbm = (p->pbm_A.prom_node ?
1161 if (pbm->devhandle == (devhandle ^ 0x40)) {
1162 pci_sun4v_pbm_init(p, dp, devhandle);
1167 for_each_possible_cpu(i) {
1168 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1171 goto fatal_memory_error;
1173 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
1176 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1178 goto fatal_memory_error;
1180 memset(p, 0, sizeof(*p));
1182 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1184 goto fatal_memory_error;
1186 memset(iommu, 0, sizeof(*iommu));
1187 p->pbm_A.iommu = iommu;
1189 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1191 goto fatal_memory_error;
1193 memset(iommu, 0, sizeof(*iommu));
1194 p->pbm_B.iommu = iommu;
1196 p->next = pci_controller_root;
1197 pci_controller_root = p;
1199 p->index = pci_num_controllers++;
1200 p->pbms_same_domain = 0;
1202 p->scan_bus = pci_sun4v_scan_bus;
1203 p->irq_build = pci_sun4v_irq_build;
1204 p->base_address_update = pci_sun4v_base_address_update;
1205 p->resource_adjust = pci_sun4v_resource_adjust;
1206 p->pci_ops = &pci_sun4v_ops;
1208 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1211 pci_memspace_mask = 0x7fffffffUL;
1213 pci_sun4v_pbm_init(p, dp, devhandle);
1217 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");