2 * Port for PPC64 David Engebretsen, IBM Corp.
3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Rework, based on alpha PCI code.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
22 #include <linux/list.h>
23 #include <linux/syscalls.h>
24 #include <linux/irq.h>
25 #include <linux/vmalloc.h>
27 #include <asm/processor.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/byteorder.h>
32 #include <asm/machdep.h>
33 #include <asm/ppc-pci.h>
34 #include <asm/firmware.h>
38 #define DBG(fmt...) printk(fmt)
43 unsigned long pci_probe_only = 1;
44 int pci_assign_all_buses = 0;
46 static void fixup_resource(struct resource *res, struct pci_dev *dev);
47 static void do_bus_setup(struct pci_bus *bus);
49 /* pci_io_base -- the base address from which io bars are offsets.
50 * This is the lowest I/O base address (so bar values are always positive),
51 * and it *must* be the start of ISA space if an ISA bus exists because
52 * ISA drivers use hard coded offsets. If no ISA bus exists nothing
53 * is mapped on the first 64K of IO space
55 unsigned long pci_io_base = ISA_IO_BASE;
56 EXPORT_SYMBOL(pci_io_base);
60 static struct dma_mapping_ops *pci_dma_ops;
62 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
64 pci_dma_ops = dma_ops;
67 struct dma_mapping_ops *get_pci_dma_ops(void)
71 EXPORT_SYMBOL(get_pci_dma_ops);
73 static void fixup_broken_pcnet32(struct pci_dev* dev)
75 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
76 dev->vendor = PCI_VENDOR_ID_AMD;
77 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
80 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
82 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
85 unsigned long offset = 0;
86 struct pci_controller *hose = pci_bus_to_host(dev->bus);
91 if (res->flags & IORESOURCE_IO)
92 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
94 if (res->flags & IORESOURCE_MEM)
95 offset = hose->pci_mem_offset;
97 region->start = res->start - offset;
98 region->end = res->end - offset;
101 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
102 struct pci_bus_region *region)
104 unsigned long offset = 0;
105 struct pci_controller *hose = pci_bus_to_host(dev->bus);
110 if (res->flags & IORESOURCE_IO)
111 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
113 if (res->flags & IORESOURCE_MEM)
114 offset = hose->pci_mem_offset;
116 res->start = region->start + offset;
117 res->end = region->end + offset;
120 #ifdef CONFIG_HOTPLUG
121 EXPORT_SYMBOL(pcibios_resource_to_bus);
122 EXPORT_SYMBOL(pcibios_bus_to_resource);
126 * We need to avoid collisions with `mirrored' VGA ports
127 * and other strange ISA hardware, so we always want the
128 * addresses to be allocated in the 0x000-0x0ff region
131 * Why? Because some silly external IO cards only decode
132 * the low 10 bits of the IO address. The 0x00-0xff region
133 * is reserved for motherboard devices that decode all 16
134 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
135 * but we want to try to avoid allocating at 0x2900-0x2bff
136 * which might have be mirrored at 0x0100-0x03ff..
138 void pcibios_align_resource(void *data, struct resource *res,
139 resource_size_t size, resource_size_t align)
141 struct pci_dev *dev = data;
142 struct pci_controller *hose = pci_bus_to_host(dev->bus);
143 resource_size_t start = res->start;
144 unsigned long alignto;
146 if (res->flags & IORESOURCE_IO) {
147 unsigned long offset = (unsigned long)hose->io_base_virt -
149 /* Make sure we start at our min on all hoses */
150 if (start - offset < PCIBIOS_MIN_IO)
151 start = PCIBIOS_MIN_IO + offset;
154 * Put everything into 0x00-0xff region modulo 0x400
157 start = (start + 0x3ff) & ~0x3ff;
159 } else if (res->flags & IORESOURCE_MEM) {
160 /* Make sure we start at our min on all hoses */
161 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
162 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
164 /* Align to multiple of size of minimum base. */
165 alignto = max(0x1000UL, align);
166 start = ALIGN(start, alignto);
172 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
175 struct pci_bus *child_bus;
177 list_for_each_entry(dev, &b->devices, bus_list) {
180 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
181 struct resource *r = &dev->resource[i];
183 if (r->parent || !r->start || !r->flags)
185 pci_claim_resource(dev, i);
189 list_for_each_entry(child_bus, &b->children, node)
190 pcibios_claim_one_bus(child_bus);
192 #ifdef CONFIG_HOTPLUG
193 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
196 static void __init pcibios_claim_of_setup(void)
200 if (firmware_has_feature(FW_FEATURE_ISERIES))
203 list_for_each_entry(b, &pci_root_buses, node)
204 pcibios_claim_one_bus(b);
207 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
212 prop = of_get_property(np, name, &len);
213 if (prop && len >= 4)
218 static unsigned int pci_parse_of_flags(u32 addr0)
220 unsigned int flags = 0;
222 if (addr0 & 0x02000000) {
223 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
224 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
225 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
226 if (addr0 & 0x40000000)
227 flags |= IORESOURCE_PREFETCH
228 | PCI_BASE_ADDRESS_MEM_PREFETCH;
229 } else if (addr0 & 0x01000000)
230 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
235 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
239 struct resource *res;
244 addrs = of_get_property(node, "assigned-addresses", &proplen);
247 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
248 for (; proplen >= 20; proplen -= 20, addrs += 5) {
249 flags = pci_parse_of_flags(addrs[0]);
252 base = of_read_number(&addrs[1], 2);
253 size = of_read_number(&addrs[3], 2);
257 DBG(" base: %llx, size: %llx, i: %x\n",
258 (unsigned long long)base, (unsigned long long)size, i);
260 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
261 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
262 } else if (i == dev->rom_base_reg) {
263 res = &dev->resource[PCI_ROM_RESOURCE];
264 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
266 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
270 res->end = base + size - 1;
272 res->name = pci_name(dev);
273 fixup_resource(res, dev);
277 struct pci_dev *of_create_pci_dev(struct device_node *node,
278 struct pci_bus *bus, int devfn)
283 dev = alloc_pci_dev();
286 type = of_get_property(node, "device_type", NULL);
290 DBG(" create device, devfn: %x, type: %s\n", devfn, type);
294 dev->dev.parent = bus->bridge;
295 dev->dev.bus = &pci_bus_type;
297 dev->multifunction = 0; /* maybe a lie? */
299 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
300 dev->device = get_int_prop(node, "device-id", 0xffff);
301 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
302 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
304 dev->cfg_size = pci_cfg_space_size(dev);
306 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
307 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
308 dev->class = get_int_prop(node, "class-code", 0);
309 dev->revision = get_int_prop(node, "revision-id", 0);
311 DBG(" class: 0x%x\n", dev->class);
312 DBG(" revision: 0x%x\n", dev->revision);
314 dev->current_state = 4; /* unknown power state */
315 dev->error_state = pci_channel_io_normal;
316 dev->dma_mask = 0xffffffff;
318 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
319 /* a PCI-PCI bridge */
320 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
321 dev->rom_base_reg = PCI_ROM_ADDRESS1;
322 } else if (!strcmp(type, "cardbus")) {
323 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
325 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
326 dev->rom_base_reg = PCI_ROM_ADDRESS;
327 /* Maybe do a default OF mapping here */
331 pci_parse_of_addrs(node, dev);
333 DBG(" adding to system ...\n");
335 pci_device_add(dev, bus);
339 EXPORT_SYMBOL(of_create_pci_dev);
341 void __devinit of_scan_bus(struct device_node *node,
344 struct device_node *child = NULL;
349 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
351 while ((child = of_get_next_child(node, child)) != NULL) {
352 DBG(" * %s\n", child->full_name);
353 reg = of_get_property(child, "reg", ®len);
354 if (reg == NULL || reglen < 20)
356 devfn = (reg[0] >> 8) & 0xff;
358 /* create a new pci_dev for this device */
359 dev = of_create_pci_dev(child, bus, devfn);
362 DBG("dev header type: %x\n", dev->hdr_type);
364 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
365 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
366 of_scan_pci_bridge(child, dev);
371 EXPORT_SYMBOL(of_scan_bus);
373 void __devinit of_scan_pci_bridge(struct device_node *node,
377 const u32 *busrange, *ranges;
379 struct resource *res;
383 DBG("of_scan_pci_bridge(%s)\n", node->full_name);
385 /* parse bus-range property */
386 busrange = of_get_property(node, "bus-range", &len);
387 if (busrange == NULL || len != 8) {
388 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
392 ranges = of_get_property(node, "ranges", &len);
393 if (ranges == NULL) {
394 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
399 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
401 printk(KERN_ERR "Failed to create pci bus for %s\n",
406 bus->primary = dev->bus->number;
407 bus->subordinate = busrange[1];
411 /* parse ranges property */
412 /* PCI #address-cells == 3 and #size-cells == 2 always */
413 res = &dev->resource[PCI_BRIDGE_RESOURCES];
414 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
416 bus->resource[i] = res;
420 for (; len >= 32; len -= 32, ranges += 8) {
421 flags = pci_parse_of_flags(ranges[0]);
422 size = of_read_number(&ranges[6], 2);
423 if (flags == 0 || size == 0)
425 if (flags & IORESOURCE_IO) {
426 res = bus->resource[0];
428 printk(KERN_ERR "PCI: ignoring extra I/O range"
429 " for bridge %s\n", node->full_name);
433 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
434 printk(KERN_ERR "PCI: too many memory ranges"
435 " for bridge %s\n", node->full_name);
438 res = bus->resource[i];
441 res->start = of_read_number(&ranges[1], 2);
442 res->end = res->start + size - 1;
444 fixup_resource(res, dev);
446 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
448 DBG(" bus name: %s\n", bus->name);
450 mode = PCI_PROBE_NORMAL;
451 if (ppc_md.pci_probe_mode)
452 mode = ppc_md.pci_probe_mode(bus);
453 DBG(" probe mode: %d\n", mode);
455 if (mode == PCI_PROBE_DEVTREE)
456 of_scan_bus(node, bus);
457 else if (mode == PCI_PROBE_NORMAL)
458 pci_scan_child_bus(bus);
460 EXPORT_SYMBOL(of_scan_pci_bridge);
462 void __devinit scan_phb(struct pci_controller *hose)
465 struct device_node *node = hose->arch_data;
467 struct resource *res;
469 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
471 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
473 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
474 hose->global_number);
477 bus->secondary = hose->first_busno;
480 if (!firmware_has_feature(FW_FEATURE_ISERIES))
481 pcibios_map_io_space(bus);
483 bus->resource[0] = res = &hose->io_resource;
484 if (res->flags && request_resource(&ioport_resource, res)) {
485 printk(KERN_ERR "Failed to request PCI IO region "
486 "on PCI domain %04x\n", hose->global_number);
487 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n",
488 res->start, res->end);
491 for (i = 0; i < 3; ++i) {
492 res = &hose->mem_resources[i];
493 bus->resource[i+1] = res;
494 if (res->flags && request_resource(&iomem_resource, res))
495 printk(KERN_ERR "Failed to request PCI memory region "
496 "on PCI domain %04x\n", hose->global_number);
499 mode = PCI_PROBE_NORMAL;
501 if (node && ppc_md.pci_probe_mode)
502 mode = ppc_md.pci_probe_mode(bus);
503 DBG(" probe mode: %d\n", mode);
504 if (mode == PCI_PROBE_DEVTREE) {
505 bus->subordinate = hose->last_busno;
506 of_scan_bus(node, bus);
509 if (mode == PCI_PROBE_NORMAL)
510 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
513 static int __init pcibios_init(void)
515 struct pci_controller *hose, *tmp;
517 /* For now, override phys_mem_access_prot. If we need it,
518 * later, we may move that initialization to each ppc_md
520 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
522 if (firmware_has_feature(FW_FEATURE_ISERIES))
523 iSeries_pcibios_init();
525 printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
527 /* Scan all of the recorded PCI controllers. */
528 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
530 pci_bus_add_devices(hose->bus);
533 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
535 pcibios_claim_of_setup();
537 /* FIXME: `else' will be removed when
538 pci_assign_unassigned_resources() is able to work
539 correctly with [partially] allocated PCI tree. */
540 pci_assign_unassigned_resources();
543 /* Call machine dependent final fixup */
544 if (ppc_md.pcibios_fixup)
545 ppc_md.pcibios_fixup();
547 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
552 subsys_initcall(pcibios_init);
554 int pcibios_enable_device(struct pci_dev *dev, int mask)
559 pci_read_config_word(dev, PCI_COMMAND, &cmd);
562 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
563 struct resource *res = &dev->resource[i];
565 /* Only set up the requested stuff */
566 if (!(mask & (1<<i)))
569 if (res->flags & IORESOURCE_IO)
570 cmd |= PCI_COMMAND_IO;
571 if (res->flags & IORESOURCE_MEM)
572 cmd |= PCI_COMMAND_MEMORY;
576 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
578 /* Enable the appropriate bits in the PCI command register. */
579 pci_write_config_word(dev, PCI_COMMAND, cmd);
584 /* Decide whether to display the domain number in /proc */
585 int pci_proc_domain(struct pci_bus *bus)
587 if (firmware_has_feature(FW_FEATURE_ISERIES))
590 struct pci_controller *hose = pci_bus_to_host(bus);
591 return hose->buid != 0;
595 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
596 struct device_node *dev, int prim)
598 const unsigned int *ranges;
599 unsigned int pci_space;
603 struct resource *res;
604 int np, na = of_n_addr_cells(dev);
605 unsigned long pci_addr, cpu_phys_addr;
609 /* From "PCI Binding to 1275"
610 * The ranges property is laid out as an array of elements,
611 * each of which comprises:
612 * cells 0 - 2: a PCI address
613 * cells 3 or 3+4: a CPU physical address
614 * (size depending on dev->n_addr_cells)
615 * cells 4+5 or 5+6: the size of the range
617 ranges = of_get_property(dev, "ranges", &rlen);
620 hose->io_base_phys = 0;
621 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
623 pci_space = ranges[0];
624 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
625 cpu_phys_addr = of_translate_address(dev, &ranges[3]);
626 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
631 /* Now consume following elements while they are contiguous */
632 while (rlen >= np * sizeof(unsigned int)) {
633 unsigned long addr, phys;
635 if (ranges[0] != pci_space)
637 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
640 phys = (phys << 32) | ranges[4];
641 if (addr != pci_addr + size ||
642 phys != cpu_phys_addr + size)
645 size += ((unsigned long)ranges[na+3] << 32)
648 rlen -= np * sizeof(unsigned int);
651 switch ((pci_space >> 24) & 0x3) {
652 case 1: /* I/O space */
653 hose->io_base_phys = cpu_phys_addr - pci_addr;
654 /* handle from 0 to top of I/O window */
655 hose->pci_io_size = pci_addr + size;
657 res = &hose->io_resource;
658 res->flags = IORESOURCE_IO;
659 res->start = pci_addr;
660 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
661 res->start, res->start + size - 1);
663 case 2: /* memory space */
665 while (memno < 3 && hose->mem_resources[memno].flags)
669 hose->pci_mem_offset = cpu_phys_addr - pci_addr;
671 res = &hose->mem_resources[memno];
672 res->flags = IORESOURCE_MEM;
673 res->start = cpu_phys_addr;
674 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
675 res->start, res->start + size - 1);
680 res->name = dev->full_name;
681 res->end = res->start + size - 1;
689 #ifdef CONFIG_HOTPLUG
691 int pcibios_unmap_io_space(struct pci_bus *bus)
693 struct pci_controller *hose;
695 WARN_ON(bus == NULL);
697 /* If this is not a PHB, we only flush the hash table over
698 * the area mapped by this bridge. We don't play with the PTE
699 * mappings since we might have to deal with sub-page alignemnts
700 * so flushing the hash table is the only sane way to make sure
701 * that no hash entries are covering that removed bridge area
702 * while still allowing other busses overlapping those pages
705 struct resource *res = bus->resource[0];
707 DBG("IO unmapping for PCI-PCI bridge %s\n",
708 pci_name(bus->self));
710 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
711 res->end - res->start + 1);
715 /* Get the host bridge */
716 hose = pci_bus_to_host(bus);
718 /* Check if we have IOs allocated */
719 if (hose->io_base_alloc == 0)
722 DBG("IO unmapping for PHB %s\n",
723 ((struct device_node *)hose->arch_data)->full_name);
724 DBG(" alloc=0x%p\n", hose->io_base_alloc);
726 /* This is a PHB, we fully unmap the IO area */
727 vunmap(hose->io_base_alloc);
731 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
733 #endif /* CONFIG_HOTPLUG */
735 int __devinit pcibios_map_io_space(struct pci_bus *bus)
737 struct vm_struct *area;
738 unsigned long phys_page;
739 unsigned long size_page;
740 unsigned long io_virt_offset;
741 struct pci_controller *hose;
743 WARN_ON(bus == NULL);
745 /* If this not a PHB, nothing to do, page tables still exist and
746 * thus HPTEs will be faulted in when needed
749 DBG("IO mapping for PCI-PCI bridge %s\n",
750 pci_name(bus->self));
751 DBG(" virt=0x%016lx...0x%016lx\n",
752 bus->resource[0]->start + _IO_BASE,
753 bus->resource[0]->end + _IO_BASE);
757 /* Get the host bridge */
758 hose = pci_bus_to_host(bus);
759 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
760 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
762 /* Make sure IO area address is clear */
763 hose->io_base_alloc = NULL;
765 /* If there's no IO to map on that bus, get away too */
766 if (hose->pci_io_size == 0 || hose->io_base_phys == 0)
769 /* Let's allocate some IO space for that guy. We don't pass
770 * VM_IOREMAP because we don't care about alignment tricks that
771 * the core does in that case. Maybe we should due to stupid card
772 * with incomplete address decoding but I'd rather not deal with
773 * those outside of the reserved 64K legacy region.
775 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
778 hose->io_base_alloc = area->addr;
779 hose->io_base_virt = (void __iomem *)(area->addr +
780 hose->io_base_phys - phys_page);
782 DBG("IO mapping for PHB %s\n",
783 ((struct device_node *)hose->arch_data)->full_name);
784 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
785 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
786 DBG(" size=0x%016lx (alloc=0x%016lx)\n",
787 hose->pci_io_size, size_page);
789 /* Establish the mapping */
790 if (__ioremap_at(phys_page, area->addr, size_page,
791 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)
794 /* Fixup hose IO resource */
795 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
796 hose->io_resource.start += io_virt_offset;
797 hose->io_resource.end += io_virt_offset;
799 DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
800 hose->io_resource.start, hose->io_resource.end);
804 EXPORT_SYMBOL_GPL(pcibios_map_io_space);
806 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
808 struct pci_controller *hose = pci_bus_to_host(dev->bus);
809 unsigned long offset;
811 if (res->flags & IORESOURCE_IO) {
812 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
813 res->start += offset;
815 } else if (res->flags & IORESOURCE_MEM) {
816 res->start += hose->pci_mem_offset;
817 res->end += hose->pci_mem_offset;
821 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
824 /* Update device resources. */
827 DBG("%s: Fixup resources:\n", pci_name(dev));
828 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
829 struct resource *res = &dev->resource[i];
833 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n",
834 i, res->flags, res->start, res->end);
836 fixup_resource(res, dev);
838 DBG(" > %08lx:0x%016lx...0x%016lx\n",
839 res->flags, res->start, res->end);
842 EXPORT_SYMBOL(pcibios_fixup_device_resources);
844 void __devinit pcibios_setup_new_device(struct pci_dev *dev)
846 struct dev_archdata *sd = &dev->dev.archdata;
848 sd->of_node = pci_device_to_OF_node(dev);
850 DBG("PCI device %s OF node: %s\n", pci_name(dev),
851 sd->of_node ? sd->of_node->full_name : "<none>");
853 sd->dma_ops = pci_dma_ops;
855 sd->numa_node = pcibus_to_node(dev->bus);
859 if (ppc_md.pci_dma_dev_setup)
860 ppc_md.pci_dma_dev_setup(dev);
862 EXPORT_SYMBOL(pcibios_setup_new_device);
864 static void __devinit do_bus_setup(struct pci_bus *bus)
868 if (ppc_md.pci_dma_bus_setup)
869 ppc_md.pci_dma_bus_setup(bus);
871 list_for_each_entry(dev, &bus->devices, bus_list)
872 pcibios_setup_new_device(dev);
874 /* Read default IRQs and fixup if necessary */
875 list_for_each_entry(dev, &bus->devices, bus_list) {
876 pci_read_irq_line(dev);
877 if (ppc_md.pci_irq_fixup)
878 ppc_md.pci_irq_fixup(dev);
882 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
884 struct pci_dev *dev = bus->self;
885 struct device_node *np;
887 np = pci_bus_to_OF_node(bus);
889 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
891 if (dev && pci_probe_only &&
892 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
893 /* This is a subordinate bridge */
895 pci_read_bridge_bases(bus);
896 pcibios_fixup_device_resources(dev, bus);
904 list_for_each_entry(dev, &bus->devices, bus_list)
905 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
906 pcibios_fixup_device_resources(dev, bus);
908 EXPORT_SYMBOL(pcibios_fixup_bus);
910 unsigned long pci_address_to_pio(phys_addr_t address)
912 struct pci_controller *hose, *tmp;
914 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
915 if (address >= hose->io_base_phys &&
916 address < (hose->io_base_phys + hose->pci_io_size)) {
918 (unsigned long)hose->io_base_virt - _IO_BASE;
919 return base + (address - hose->io_base_phys);
922 return (unsigned int)-1;
924 EXPORT_SYMBOL_GPL(pci_address_to_pio);
927 #define IOBASE_BRIDGE_NUMBER 0
928 #define IOBASE_MEMORY 1
930 #define IOBASE_ISA_IO 3
931 #define IOBASE_ISA_MEM 4
933 long sys_pciconfig_iobase(long which, unsigned long in_bus,
934 unsigned long in_devfn)
936 struct pci_controller* hose;
937 struct list_head *ln;
938 struct pci_bus *bus = NULL;
939 struct device_node *hose_node;
941 /* Argh ! Please forgive me for that hack, but that's the
942 * simplest way to get existing XFree to not lockup on some
943 * G5 machines... So when something asks for bus 0 io base
944 * (bus 0 is HT root), we return the AGP one instead.
946 if (machine_is_compatible("MacRISC4"))
950 /* That syscall isn't quite compatible with PCI domains, but it's
951 * used on pre-domains setup. We return the first match
954 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
956 if (in_bus >= bus->number && in_bus <= bus->subordinate)
960 if (bus == NULL || bus->sysdata == NULL)
963 hose_node = (struct device_node *)bus->sysdata;
964 hose = PCI_DN(hose_node)->phb;
967 case IOBASE_BRIDGE_NUMBER:
968 return (long)hose->first_busno;
970 return (long)hose->pci_mem_offset;
972 return (long)hose->io_base_phys;
974 return (long)isa_io_base;
983 int pcibus_to_node(struct pci_bus *bus)
985 struct pci_controller *phb = pci_bus_to_host(bus);
988 EXPORT_SYMBOL(pcibus_to_node);