2 * Common pmac/prep/chrp pci routines. -- Cort
5 #include <linux/kernel.h>
7 #include <linux/delay.h>
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/capability.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
13 #include <linux/bootmem.h>
14 #include <linux/irq.h>
15 #include <linux/list.h>
17 #include <asm/processor.h>
20 #include <asm/sections.h>
21 #include <asm/pci-bridge.h>
22 #include <asm/byteorder.h>
23 #include <asm/uaccess.h>
24 #include <asm/machdep.h>
29 #define DBG(x...) printk(x)
34 unsigned long isa_io_base = 0;
35 unsigned long isa_mem_base = 0;
36 unsigned long pci_dram_offset = 0;
37 int pcibios_assign_bus_offset = 1;
39 void pcibios_make_OF_bus_map(void);
41 static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
42 static int probe_resource(struct pci_bus *parent, struct resource *pr,
43 struct resource *res, struct resource **conflict);
44 static void update_bridge_base(struct pci_bus *bus, int i);
45 static void pcibios_fixup_resources(struct pci_dev* dev);
46 static void fixup_broken_pcnet32(struct pci_dev* dev);
47 static int reparent_resources(struct resource *parent, struct resource *res);
48 static void fixup_cpc710_pci64(struct pci_dev* dev);
50 static u8* pci_to_OF_bus_map;
53 /* By default, we don't re-assign bus numbers. We do this only on
56 int pci_assign_all_buses;
58 struct pci_controller* hose_head;
59 struct pci_controller** hose_tail = &hose_head;
61 static int pci_bus_count;
64 fixup_broken_pcnet32(struct pci_dev* dev)
66 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
67 dev->vendor = PCI_VENDOR_ID_AMD;
68 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
71 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
74 fixup_cpc710_pci64(struct pci_dev* dev)
76 /* Hide the PCI64 BARs from the kernel as their content doesn't
77 * fit well in the resource management
79 dev->resource[0].start = dev->resource[0].end = 0;
80 dev->resource[0].flags = 0;
81 dev->resource[1].start = dev->resource[1].end = 0;
82 dev->resource[1].flags = 0;
84 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
87 pcibios_fixup_resources(struct pci_dev *dev)
89 struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
94 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
97 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
98 struct resource *res = dev->resource + i;
101 if (res->end == 0xffffffff) {
102 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
103 pci_name(dev), i, (u64)res->start, (u64)res->end);
104 res->end -= res->start;
106 res->flags |= IORESOURCE_UNSET;
110 if (res->flags & IORESOURCE_MEM) {
111 offset = hose->pci_mem_offset;
112 } else if (res->flags & IORESOURCE_IO) {
113 offset = (unsigned long) hose->io_base_virt
117 res->start += offset;
119 DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
120 i, res->flags, pci_name(dev),
121 (u64)res->start - offset, (u64)res->start);
125 /* Call machine specific resource fixup */
126 if (ppc_md.pcibios_fixup_resources)
127 ppc_md.pcibios_fixup_resources(dev);
129 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
131 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
132 struct resource *res)
134 unsigned long offset = 0;
135 struct pci_controller *hose = dev->sysdata;
137 if (hose && res->flags & IORESOURCE_IO)
138 offset = (unsigned long)hose->io_base_virt - isa_io_base;
139 else if (hose && res->flags & IORESOURCE_MEM)
140 offset = hose->pci_mem_offset;
141 region->start = res->start - offset;
142 region->end = res->end - offset;
144 EXPORT_SYMBOL(pcibios_resource_to_bus);
146 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
147 struct pci_bus_region *region)
149 unsigned long offset = 0;
150 struct pci_controller *hose = dev->sysdata;
152 if (hose && res->flags & IORESOURCE_IO)
153 offset = (unsigned long)hose->io_base_virt - isa_io_base;
154 else if (hose && res->flags & IORESOURCE_MEM)
155 offset = hose->pci_mem_offset;
156 res->start = region->start + offset;
157 res->end = region->end + offset;
159 EXPORT_SYMBOL(pcibios_bus_to_resource);
162 * We need to avoid collisions with `mirrored' VGA ports
163 * and other strange ISA hardware, so we always want the
164 * addresses to be allocated in the 0x000-0x0ff region
167 * Why? Because some silly external IO cards only decode
168 * the low 10 bits of the IO address. The 0x00-0xff region
169 * is reserved for motherboard devices that decode all 16
170 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
171 * but we want to try to avoid allocating at 0x2900-0x2bff
172 * which might have be mirrored at 0x0100-0x03ff..
174 void pcibios_align_resource(void *data, struct resource *res,
175 resource_size_t size, resource_size_t align)
177 struct pci_dev *dev = data;
179 if (res->flags & IORESOURCE_IO) {
180 resource_size_t start = res->start;
183 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
184 " (%lld bytes)\n", pci_name(dev),
185 dev->resource - res, (unsigned long long)size);
189 start = (start + 0x3ff) & ~0x3ff;
194 EXPORT_SYMBOL(pcibios_align_resource);
197 * Handle resources of PCI devices. If the world were perfect, we could
198 * just allocate all the resource regions and do nothing more. It isn't.
199 * On the other hand, we cannot just re-allocate all devices, as it would
200 * require us to know lots of host bridge internals. So we attempt to
201 * keep as much of the original configuration as possible, but tweak it
202 * when it's found to be wrong.
204 * Known BIOS problems we have to work around:
205 * - I/O or memory regions not configured
206 * - regions configured, but not enabled in the command register
207 * - bogus I/O addresses above 64K used
208 * - expansion ROMs left enabled (this may sound harmless, but given
209 * the fact the PCI specs explicitly allow address decoders to be
210 * shared between expansion ROMs and other resource regions, it's
211 * at least dangerous)
214 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
215 * This gives us fixed barriers on where we can allocate.
216 * (2) Allocate resources for all enabled devices. If there is
217 * a collision, just mark the resource as unallocated. Also
218 * disable expansion ROMs during this step.
219 * (3) Try to allocate resources for disabled devices. If the
220 * resources were assigned correctly, everything goes well,
221 * if they weren't, they won't disturb allocation of other
223 * (4) Assign new addresses to resources which were either
224 * not configured at all or misconfigured. If explicitly
225 * requested by the user, configure expansion ROM address
230 pcibios_allocate_bus_resources(struct list_head *bus_list)
234 struct resource *res, *pr;
236 /* Depth-First Search on bus tree */
237 list_for_each_entry(bus, bus_list, node) {
238 for (i = 0; i < 4; ++i) {
239 if ((res = bus->resource[i]) == NULL || !res->flags
240 || res->start > res->end)
242 if (bus->parent == NULL)
243 pr = (res->flags & IORESOURCE_IO)?
244 &ioport_resource: &iomem_resource;
246 pr = pci_find_parent_resource(bus->self, res);
248 /* this happens when the generic PCI
249 * code (wrongly) decides that this
250 * bridge is transparent -- paulus
256 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
257 (u64)res->start, (u64)res->end, res->flags, pr);
259 if (request_resource(pr, res) == 0)
262 * Must be a conflict with an existing entry.
263 * Move that entry (or entries) under the
264 * bridge resource and try again.
266 if (reparent_resources(pr, res) == 0)
269 printk(KERN_ERR "PCI: Cannot allocate resource region "
270 "%d of PCI bridge %d\n", i, bus->number);
271 if (pci_relocate_bridge_resource(bus, i))
272 bus->resource[i] = NULL;
274 pcibios_allocate_bus_resources(&bus->children);
279 * Reparent resource children of pr that conflict with res
280 * under res, and make res replace those children.
283 reparent_resources(struct resource *parent, struct resource *res)
285 struct resource *p, **pp;
286 struct resource **firstpp = NULL;
288 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
289 if (p->end < res->start)
291 if (res->end < p->start)
293 if (p->start < res->start || p->end > res->end)
294 return -1; /* not completely contained */
299 return -1; /* didn't find any conflicting entries? */
300 res->parent = parent;
301 res->child = *firstpp;
305 for (p = res->child; p != NULL; p = p->sibling) {
307 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
308 p->name, (u64)p->start, (u64)p->end, res->name);
314 * A bridge has been allocated a range which is outside the range
315 * of its parent bridge, so it needs to be moved.
318 pci_relocate_bridge_resource(struct pci_bus *bus, int i)
320 struct resource *res, *pr, *conflict;
321 unsigned long try, size;
323 struct pci_bus *parent = bus->parent;
325 if (parent == NULL) {
326 /* shouldn't ever happen */
327 printk(KERN_ERR "PCI: can't move host bridge resource\n");
330 res = bus->resource[i];
334 for (j = 0; j < 4; j++) {
335 struct resource *r = parent->resource[j];
338 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
340 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
344 if (res->flags & IORESOURCE_PREFETCH)
349 size = res->end - res->start;
350 if (pr->start > pr->end || size > pr->end - pr->start)
354 res->start = try - size;
356 if (probe_resource(bus->parent, pr, res, &conflict) == 0)
358 if (conflict->start <= pr->start + size)
360 try = conflict->start - 1;
362 if (request_resource(pr, res)) {
363 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
364 (u64)res->start, (u64)res->end);
365 return -1; /* "can't happen" */
367 update_bridge_base(bus, i);
368 printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
369 bus->number, i, (unsigned long long)res->start,
370 (unsigned long long)res->end);
375 probe_resource(struct pci_bus *parent, struct resource *pr,
376 struct resource *res, struct resource **conflict)
383 for (r = pr->child; r != NULL; r = r->sibling) {
384 if (r->end >= res->start && res->end >= r->start) {
389 list_for_each_entry(bus, &parent->children, node) {
390 for (i = 0; i < 4; ++i) {
391 if ((r = bus->resource[i]) == NULL)
393 if (!r->flags || r->start > r->end || r == res)
395 if (pci_find_parent_resource(bus->self, r) != pr)
397 if (r->end >= res->start && res->end >= r->start) {
403 list_for_each_entry(dev, &parent->devices, bus_list) {
404 for (i = 0; i < 6; ++i) {
405 r = &dev->resource[i];
406 if (!r->flags || (r->flags & IORESOURCE_UNSET))
408 if (pci_find_parent_resource(dev, r) != pr)
410 if (r->end >= res->start && res->end >= r->start) {
420 update_bridge_base(struct pci_bus *bus, int i)
422 struct resource *res = bus->resource[i];
423 u8 io_base_lo, io_limit_lo;
424 u16 mem_base, mem_limit;
426 unsigned long start, end, off;
427 struct pci_dev *dev = bus->self;
428 struct pci_controller *hose = dev->sysdata;
431 printk("update_bridge_base: no hose?\n");
434 pci_read_config_word(dev, PCI_COMMAND, &cmd);
435 pci_write_config_word(dev, PCI_COMMAND,
436 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
437 if (res->flags & IORESOURCE_IO) {
438 off = (unsigned long) hose->io_base_virt - isa_io_base;
439 start = res->start - off;
440 end = res->end - off;
441 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
442 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
444 io_base_lo |= PCI_IO_RANGE_TYPE_32;
446 io_base_lo |= PCI_IO_RANGE_TYPE_16;
447 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
449 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
451 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
452 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
454 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
456 off = hose->pci_mem_offset;
457 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
458 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
459 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
460 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
462 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
463 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
464 off = hose->pci_mem_offset;
465 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
466 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
467 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
468 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
471 DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
472 pci_name(dev), i, res->flags);
474 pci_write_config_word(dev, PCI_COMMAND, cmd);
477 static inline void alloc_resource(struct pci_dev *dev, int idx)
479 struct resource *pr, *r = &dev->resource[idx];
481 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
482 pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
483 pr = pci_find_parent_resource(dev, r);
484 if (!pr || request_resource(pr, r) < 0) {
485 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
486 " of device %s\n", idx, pci_name(dev));
488 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
489 pr, (u64)pr->start, (u64)pr->end, pr->flags);
490 /* We'll assign a new address later */
491 r->flags |= IORESOURCE_UNSET;
498 pcibios_allocate_resources(int pass)
500 struct pci_dev *dev = NULL;
505 for_each_pci_dev(dev) {
506 pci_read_config_word(dev, PCI_COMMAND, &command);
507 for (idx = 0; idx < 6; idx++) {
508 r = &dev->resource[idx];
509 if (r->parent) /* Already allocated */
511 if (!r->flags || (r->flags & IORESOURCE_UNSET))
512 continue; /* Not assigned at all */
513 if (r->flags & IORESOURCE_IO)
514 disabled = !(command & PCI_COMMAND_IO);
516 disabled = !(command & PCI_COMMAND_MEMORY);
517 if (pass == disabled)
518 alloc_resource(dev, idx);
522 r = &dev->resource[PCI_ROM_RESOURCE];
523 if (r->flags & IORESOURCE_ROM_ENABLE) {
524 /* Turn the ROM off, leave the resource region, but keep it unregistered. */
526 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
527 r->flags &= ~IORESOURCE_ROM_ENABLE;
528 pci_read_config_dword(dev, dev->rom_base_reg, ®);
529 pci_write_config_dword(dev, dev->rom_base_reg,
530 reg & ~PCI_ROM_ADDRESS_ENABLE);
536 pcibios_assign_resources(void)
538 struct pci_dev *dev = NULL;
542 for_each_pci_dev(dev) {
543 int class = dev->class >> 8;
545 /* Don't touch classless devices and host bridges */
546 if (!class || class == PCI_CLASS_BRIDGE_HOST)
549 for (idx = 0; idx < 6; idx++) {
550 r = &dev->resource[idx];
553 * We shall assign a new address to this resource,
554 * either because the BIOS (sic) forgot to do so
555 * or because we have decided the old address was
556 * unusable for some reason.
558 if ((r->flags & IORESOURCE_UNSET) && r->end &&
559 (!ppc_md.pcibios_enable_device_hook ||
560 !ppc_md.pcibios_enable_device_hook(dev, 1))) {
561 r->flags &= ~IORESOURCE_UNSET;
562 pci_assign_resource(dev, idx);
566 #if 0 /* don't assign ROMs */
567 r = &dev->resource[PCI_ROM_RESOURCE];
571 pci_assign_resource(dev, PCI_ROM_RESOURCE);
578 pcibios_enable_resources(struct pci_dev *dev, int mask)
584 pci_read_config_word(dev, PCI_COMMAND, &cmd);
586 for (idx=0; idx<6; idx++) {
587 /* Only set up the requested stuff */
588 if (!(mask & (1<<idx)))
591 r = &dev->resource[idx];
592 if (r->flags & IORESOURCE_UNSET) {
593 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
596 if (r->flags & IORESOURCE_IO)
597 cmd |= PCI_COMMAND_IO;
598 if (r->flags & IORESOURCE_MEM)
599 cmd |= PCI_COMMAND_MEMORY;
601 if (dev->resource[PCI_ROM_RESOURCE].start)
602 cmd |= PCI_COMMAND_MEMORY;
603 if (cmd != old_cmd) {
604 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
605 pci_write_config_word(dev, PCI_COMMAND, cmd);
610 static int next_controller_index;
612 struct pci_controller * __init
613 pcibios_alloc_controller(void)
615 struct pci_controller *hose;
617 hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
618 memset(hose, 0, sizeof(struct pci_controller));
621 hose_tail = &hose->next;
623 hose->index = next_controller_index++;
630 * Functions below are used on OpenFirmware machines.
633 make_one_node_map(struct device_node* node, u8 pci_bus)
635 const int *bus_range;
638 if (pci_bus >= pci_bus_count)
640 bus_range = of_get_property(node, "bus-range", &len);
641 if (bus_range == NULL || len < 2 * sizeof(int)) {
642 printk(KERN_WARNING "Can't get bus-range for %s, "
643 "assuming it starts at 0\n", node->full_name);
644 pci_to_OF_bus_map[pci_bus] = 0;
646 pci_to_OF_bus_map[pci_bus] = bus_range[0];
648 for (node=node->child; node != 0;node = node->sibling) {
650 const unsigned int *class_code, *reg;
652 class_code = of_get_property(node, "class-code", NULL);
653 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
654 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
656 reg = of_get_property(node, "reg", NULL);
659 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
660 if (!dev || !dev->subordinate) {
664 make_one_node_map(node, dev->subordinate->number);
670 pcibios_make_OF_bus_map(void)
673 struct pci_controller* hose;
674 struct property *map_prop;
675 struct device_node *dn;
677 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
678 if (!pci_to_OF_bus_map) {
679 printk(KERN_ERR "Can't allocate OF bus map !\n");
683 /* We fill the bus map with invalid values, that helps
686 for (i=0; i<pci_bus_count; i++)
687 pci_to_OF_bus_map[i] = 0xff;
689 /* For each hose, we begin searching bridges */
690 for(hose=hose_head; hose; hose=hose->next) {
691 struct device_node* node;
692 node = (struct device_node *)hose->arch_data;
695 make_one_node_map(node, hose->first_busno);
697 dn = of_find_node_by_path("/");
698 map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
700 BUG_ON(pci_bus_count > map_prop->length);
701 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
705 printk("PCI->OF bus map:\n");
706 for (i=0; i<pci_bus_count; i++) {
707 if (pci_to_OF_bus_map[i] == 0xff)
709 printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
714 typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
716 static struct device_node*
717 scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
719 struct device_node* sub_node;
721 for (; node != 0;node = node->sibling) {
722 const unsigned int *class_code;
724 if (filter(node, data))
727 /* For PCI<->PCI bridges or CardBus bridges, we go down
728 * Note: some OFs create a parent node "multifunc-device" as
729 * a fake root for all functions of a multi-function device,
730 * we go down them as well.
732 class_code = of_get_property(node, "class-code", NULL);
733 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
734 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
735 strcmp(node->name, "multifunc-device"))
737 sub_node = scan_OF_pci_childs(node->child, filter, data);
744 static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
747 struct device_node *np = NULL;
751 while ((np = of_get_next_child(parent, np)) != NULL) {
752 reg = of_get_property(np, "reg", &psize);
753 if (reg == NULL || psize < 4)
755 if (((reg[0] >> 8) & 0xff) == devfn)
762 static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
764 struct device_node *parent, *np;
766 /* Are we a root bus ? */
767 if (bus->self == NULL || bus->parent == NULL) {
768 struct pci_controller *hose = pci_bus_to_hose(bus->number);
771 return of_node_get(hose->arch_data);
774 /* not a root bus, we need to get our parent */
775 parent = scan_OF_for_pci_bus(bus->parent);
779 /* now iterate for children for a match */
780 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
787 * Scans the OF tree for a device node matching a PCI device
790 pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
792 struct device_node *parent, *np;
797 DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
798 parent = scan_OF_for_pci_bus(bus);
801 DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
802 np = scan_OF_for_pci_dev(parent, devfn);
804 DBG(" result is %s\n", np ? np->full_name : "<NULL>");
806 /* XXX most callers don't release the returned node
807 * mostly because ppc64 doesn't increase the refcount,
808 * we need to fix that.
812 EXPORT_SYMBOL(pci_busdev_to_OF_node);
815 pci_device_to_OF_node(struct pci_dev *dev)
817 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
819 EXPORT_SYMBOL(pci_device_to_OF_node);
821 /* This routine is meant to be used early during boot, when the
822 * PCI bus numbers have not yet been assigned, and you need to
823 * issue PCI config cycles to an OF device.
824 * It could also be used to "fix" RTAS config cycles if you want
825 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
828 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
833 struct pci_controller* hose;
834 for (hose=hose_head;hose;hose=hose->next)
835 if (hose->arch_data == node)
843 find_OF_pci_device_filter(struct device_node* node, void* data)
845 return ((void *)node == data);
849 * Returns the PCI device matching a given OF node
852 pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
854 const unsigned int *reg;
855 struct pci_controller* hose;
856 struct pci_dev* dev = NULL;
860 /* Make sure it's really a PCI device */
861 hose = pci_find_hose_for_OF_device(node);
862 if (!hose || !hose->arch_data)
864 if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
865 find_OF_pci_device_filter, (void *)node))
867 reg = of_get_property(node, "reg", NULL);
870 *bus = (reg[0] >> 16) & 0xff;
871 *devfn = ((reg[0] >> 8) & 0xff);
873 /* Ok, here we need some tweak. If we have already renumbered
874 * all busses, we can't rely on the OF bus number any more.
875 * the pci_to_OF_bus_map is not enough as several PCI busses
876 * may match the same OF bus number.
878 if (!pci_to_OF_bus_map)
881 for_each_pci_dev(dev)
882 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
883 dev->devfn == *devfn) {
884 *bus = dev->bus->number;
891 EXPORT_SYMBOL(pci_device_from_OF_node);
894 pci_process_bridge_OF_ranges(struct pci_controller *hose,
895 struct device_node *dev, int primary)
897 static unsigned int static_lc_ranges[256] __initdata;
898 const unsigned int *dt_ranges;
899 unsigned int *lc_ranges, *ranges, *prev, size;
900 int rlen = 0, orig_rlen;
902 struct resource *res;
903 int np, na = of_n_addr_cells(dev);
906 /* First we try to merge ranges to fix a problem with some pmacs
907 * that can have more than 3 ranges, fortunately using contiguous
910 dt_ranges = of_get_property(dev, "ranges", &rlen);
913 /* Sanity check, though hopefully that never happens */
914 if (rlen > sizeof(static_lc_ranges)) {
915 printk(KERN_WARNING "OF ranges property too large !\n");
916 rlen = sizeof(static_lc_ranges);
918 lc_ranges = static_lc_ranges;
919 memcpy(lc_ranges, dt_ranges, rlen);
922 /* Let's work on a copy of the "ranges" property instead of damaging
923 * the device-tree image in memory
927 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
929 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
930 (prev[2] + prev[na+4]) == ranges[2] &&
931 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
932 prev[na+4] += ranges[na+4];
943 * The ranges property is laid out as an array of elements,
944 * each of which comprises:
945 * cells 0 - 2: a PCI address
946 * cells 3 or 3+4: a CPU physical address
947 * (size depending on dev->n_addr_cells)
948 * cells 4+5 or 5+6: the size of the range
952 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
955 switch ((ranges[0] >> 24) & 0x3) {
956 case 1: /* I/O space */
959 hose->io_base_phys = ranges[na+2];
960 /* limit I/O space to 16MB */
961 if (size > 0x01000000)
963 hose->io_base_virt = ioremap(ranges[na+2], size);
965 isa_io_base = (unsigned long) hose->io_base_virt;
966 res = &hose->io_resource;
967 res->flags = IORESOURCE_IO;
968 res->start = ranges[2];
969 DBG("PCI: IO 0x%llx -> 0x%llx\n",
970 (u64)res->start, (u64)res->start + size - 1);
972 case 2: /* memory space */
974 if (ranges[1] == 0 && ranges[2] == 0
975 && ranges[na+4] <= (16 << 20)) {
976 /* 1st 16MB, i.e. ISA memory area */
978 isa_mem_base = ranges[na+2];
981 while (memno < 3 && hose->mem_resources[memno].flags)
984 hose->pci_mem_offset = ranges[na+2] - ranges[2];
986 res = &hose->mem_resources[memno];
987 res->flags = IORESOURCE_MEM;
988 if(ranges[0] & 0x40000000)
989 res->flags |= IORESOURCE_PREFETCH;
990 res->start = ranges[na+2];
991 DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
992 (u64)res->start, (u64)res->start + size - 1);
997 res->name = dev->full_name;
998 res->end = res->start + size - 1;
1000 res->sibling = NULL;
1007 /* We create the "pci-OF-bus-map" property now so it appears in the
1011 pci_create_OF_bus_map(void)
1013 struct property* of_prop;
1014 struct device_node *dn;
1016 of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
1019 dn = of_find_node_by_path("/");
1021 memset(of_prop, -1, sizeof(struct property) + 256);
1022 of_prop->name = "pci-OF-bus-map";
1023 of_prop->length = 256;
1024 of_prop->value = &of_prop[1];
1025 prom_add_property(dn, of_prop);
1030 static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1032 struct pci_dev *pdev;
1033 struct device_node *np;
1035 pdev = to_pci_dev (dev);
1036 np = pci_device_to_OF_node(pdev);
1037 if (np == NULL || np->full_name == NULL)
1039 return sprintf(buf, "%s", np->full_name);
1041 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1043 #else /* CONFIG_PPC_OF */
1044 void pcibios_make_OF_bus_map(void)
1047 #endif /* CONFIG_PPC_OF */
1049 /* Add sysfs properties */
1050 void pcibios_add_platform_entries(struct pci_dev *pdev)
1052 #ifdef CONFIG_PPC_OF
1053 device_create_file(&pdev->dev, &dev_attr_devspec);
1054 #endif /* CONFIG_PPC_OF */
1058 #ifdef CONFIG_PPC_PMAC
1060 * This set of routines checks for PCI<->PCI bridges that have closed
1061 * IO resources and have child devices. It tries to re-open an IO
1064 * This is a _temporary_ fix to workaround a problem with Apple's OF
1065 * closing IO windows on P2P bridges when the OF drivers of cards
1066 * below this bridge don't claim any IO range (typically ATI or
1069 * A more complete fix would be to use drivers/pci/setup-bus.c, which
1070 * involves a working pcibios_fixup_pbus_ranges(), some more care about
1071 * ordering when creating the host bus resources, and maybe a few more
1075 /* Initialize bridges with base/limit values we have collected */
1077 do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
1079 struct pci_dev *bridge = bus->self;
1080 struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
1083 struct resource res;
1085 if (bus->resource[0] == NULL)
1087 res = *(bus->resource[0]);
1089 DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
1090 res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
1091 res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
1092 DBG(" IO window: %016llx-%016llx\n", res.start, res.end);
1094 /* Set up the top and bottom of the PCI I/O segment for this bus. */
1095 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
1097 l |= (res.start >> 8) & 0x00f0;
1098 l |= res.end & 0xf000;
1099 pci_write_config_dword(bridge, PCI_IO_BASE, l);
1101 if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1102 l = (res.start >> 16) | (res.end & 0xffff0000);
1103 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
1106 pci_read_config_word(bridge, PCI_COMMAND, &w);
1107 w |= PCI_COMMAND_IO;
1108 pci_write_config_word(bridge, PCI_COMMAND, w);
1110 #if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
1112 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
1113 w |= PCI_BRIDGE_CTL_VGA;
1114 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
1119 /* This function is pretty basic and actually quite broken for the
1120 * general case, it's enough for us right now though. It's supposed
1121 * to tell us if we need to open an IO range at all or not and what
1125 check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
1127 struct pci_dev *dev;
1131 #define push_end(res, mask) do { \
1132 BUG_ON((mask+1) & mask); \
1133 res->end = (res->end + mask) | mask; \
1136 list_for_each_entry(dev, &bus->devices, bus_list) {
1137 u16 class = dev->class >> 8;
1139 if (class == PCI_CLASS_DISPLAY_VGA ||
1140 class == PCI_CLASS_NOT_DEFINED_VGA)
1142 if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
1143 rc |= check_for_io_childs(dev->subordinate, res, found_vga);
1144 if (class == PCI_CLASS_BRIDGE_CARDBUS)
1145 push_end(res, 0xfff);
1147 for (i=0; i<PCI_NUM_RESOURCES; i++) {
1149 unsigned long r_size;
1151 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
1152 && i >= PCI_BRIDGE_RESOURCES)
1154 r = &dev->resource[i];
1155 r_size = r->end - r->start;
1158 if (r->flags & IORESOURCE_IO && (r_size) != 0) {
1160 push_end(res, r_size);
1168 /* Here we scan all P2P bridges of a given level that have a closed
1169 * IO window. Note that the test for the presence of a VGA card should
1170 * be improved to take into account already configured P2P bridges,
1171 * currently, we don't see them and might end up configuring 2 bridges
1172 * with VGA pass through enabled
1175 do_fixup_p2p_level(struct pci_bus *bus)
1181 for (parent_io=0; parent_io<4; parent_io++)
1182 if (bus->resource[parent_io]
1183 && bus->resource[parent_io]->flags & IORESOURCE_IO)
1188 list_for_each_entry(b, &bus->children, node) {
1189 struct pci_dev *d = b->self;
1190 struct pci_controller* hose = (struct pci_controller *)d->sysdata;
1191 struct resource *res = b->resource[0];
1192 struct resource tmp_res;
1196 memset(&tmp_res, 0, sizeof(tmp_res));
1197 tmp_res.start = bus->resource[parent_io]->start;
1199 /* We don't let low addresses go through that closed P2P bridge, well,
1200 * that may not be necessary but I feel safer that way
1202 if (tmp_res.start == 0)
1203 tmp_res.start = 0x1000;
1205 if (!list_empty(&b->devices) && res && res->flags == 0 &&
1206 res != bus->resource[parent_io] &&
1207 (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
1208 check_for_io_childs(b, &tmp_res, &found_vga)) {
1211 printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
1215 printk(KERN_WARNING "Skipping VGA, already active"
1216 " on bus segment\n");
1221 pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
1223 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
1224 max = ((unsigned long) hose->io_base_virt
1225 - isa_io_base) + 0xffffffff;
1227 max = ((unsigned long) hose->io_base_virt
1228 - isa_io_base) + 0xffff;
1231 res->flags = IORESOURCE_IO;
1232 res->name = b->name;
1234 /* Find a resource in the parent where we can allocate */
1235 for (i = 0 ; i < 4; i++) {
1236 struct resource *r = bus->resource[i];
1239 if ((r->flags & IORESOURCE_IO) == 0)
1241 DBG("Trying to allocate from %016llx, size %016llx from parent"
1242 " res %d: %016llx -> %016llx\n",
1243 res->start, res->end, i, r->start, r->end);
1245 if (allocate_resource(r, res, res->end + 1, res->start, max,
1246 res->end + 1, NULL, NULL) < 0) {
1250 do_update_p2p_io_resource(b, found_vga);
1254 do_fixup_p2p_level(b);
1259 pcibios_fixup_p2p_bridges(void)
1263 list_for_each_entry(b, &pci_root_buses, node)
1264 do_fixup_p2p_level(b);
1267 #endif /* CONFIG_PPC_PMAC */
1272 struct pci_controller *hose;
1273 struct pci_bus *bus;
1276 printk(KERN_INFO "PCI: Probing PCI hardware\n");
1278 /* Scan all of the recorded PCI controllers. */
1279 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1280 if (pci_assign_all_buses)
1281 hose->first_busno = next_busno;
1282 hose->last_busno = 0xff;
1283 bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
1286 pci_bus_add_devices(bus);
1287 hose->last_busno = bus->subordinate;
1288 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1289 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1291 pci_bus_count = next_busno;
1293 /* OpenFirmware based machines need a map of OF bus
1294 * numbers vs. kernel bus numbers since we may have to
1297 if (pci_assign_all_buses && have_of)
1298 pcibios_make_OF_bus_map();
1300 /* Call machine dependent fixup */
1301 if (ppc_md.pcibios_fixup)
1302 ppc_md.pcibios_fixup();
1304 /* Allocate and assign resources */
1305 pcibios_allocate_bus_resources(&pci_root_buses);
1306 pcibios_allocate_resources(0);
1307 pcibios_allocate_resources(1);
1308 #ifdef CONFIG_PPC_PMAC
1309 pcibios_fixup_p2p_bridges();
1310 #endif /* CONFIG_PPC_PMAC */
1311 pcibios_assign_resources();
1313 /* Call machine dependent post-init code */
1314 if (ppc_md.pcibios_after_init)
1315 ppc_md.pcibios_after_init();
1320 subsys_initcall(pcibios_init);
1322 unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
1323 unsigned long start, unsigned long size)
1328 void __init pcibios_fixup_bus(struct pci_bus *bus)
1330 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1331 unsigned long io_offset;
1332 struct resource *res;
1333 struct pci_dev *dev;
1336 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1337 if (bus->parent == NULL) {
1338 /* This is a host bridge - fill in its resources */
1341 bus->resource[0] = res = &hose->io_resource;
1344 printk(KERN_ERR "I/O resource not set for host"
1345 " bridge %d\n", hose->index);
1347 res->end = IO_SPACE_LIMIT;
1348 res->flags = IORESOURCE_IO;
1350 res->start += io_offset;
1351 res->end += io_offset;
1353 for (i = 0; i < 3; ++i) {
1354 res = &hose->mem_resources[i];
1358 printk(KERN_ERR "Memory resource not set for "
1359 "host bridge %d\n", hose->index);
1360 res->start = hose->pci_mem_offset;
1362 res->flags = IORESOURCE_MEM;
1364 bus->resource[i+1] = res;
1367 /* This is a subordinate bridge */
1368 pci_read_bridge_bases(bus);
1370 for (i = 0; i < 4; ++i) {
1371 if ((res = bus->resource[i]) == NULL)
1375 if (io_offset && (res->flags & IORESOURCE_IO)) {
1376 res->start += io_offset;
1377 res->end += io_offset;
1378 } else if (hose->pci_mem_offset
1379 && (res->flags & IORESOURCE_MEM)) {
1380 res->start += hose->pci_mem_offset;
1381 res->end += hose->pci_mem_offset;
1386 /* Platform specific bus fixups */
1387 if (ppc_md.pcibios_fixup_bus)
1388 ppc_md.pcibios_fixup_bus(bus);
1390 /* Read default IRQs and fixup if necessary */
1391 list_for_each_entry(dev, &bus->devices, bus_list) {
1392 pci_read_irq_line(dev);
1393 if (ppc_md.pci_irq_fixup)
1394 ppc_md.pci_irq_fixup(dev);
1398 char __init *pcibios_setup(char *str)
1403 /* the next one is stolen from the alpha port... */
1405 pcibios_update_irq(struct pci_dev *dev, int irq)
1407 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
1408 /* XXX FIXME - update OF device tree node interrupt property */
1411 #ifdef CONFIG_PPC_MERGE
1412 /* XXX This is a copy of the ppc64 version. This is temporary until we start
1413 * merging the 2 PCI layers
1416 * Reads the interrupt pin to determine if interrupt is use by card.
1417 * If the interrupt is used, then gets the interrupt line from the
1418 * openfirmware and sets it in the pci_dev and pci_config line.
1420 int pci_read_irq_line(struct pci_dev *pci_dev)
1425 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1427 /* Try to get a mapping from the device-tree */
1428 if (of_irq_map_pci(pci_dev, &oirq)) {
1431 /* If that fails, lets fallback to what is in the config
1432 * space and map that through the default controller. We
1433 * also set the type to level low since that's what PCI
1434 * interrupts are. If your platform does differently, then
1435 * either provide a proper interrupt tree or don't use this
1438 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
1442 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
1446 DBG(" -> no map ! Using irq line %d from PCI config\n", line);
1448 virq = irq_create_mapping(NULL, line);
1450 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
1452 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
1453 oirq.size, oirq.specifier[0], oirq.controller->full_name);
1455 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
1458 if(virq == NO_IRQ) {
1459 DBG(" -> failed to map !\n");
1462 pci_dev->irq = virq;
1466 EXPORT_SYMBOL(pci_read_irq_line);
1467 #endif /* CONFIG_PPC_MERGE */
1469 int pcibios_enable_device(struct pci_dev *dev, int mask)
1475 if (ppc_md.pcibios_enable_device_hook)
1476 if (ppc_md.pcibios_enable_device_hook(dev, 0))
1479 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1481 for (idx=0; idx<6; idx++) {
1482 r = &dev->resource[idx];
1483 if (r->flags & IORESOURCE_UNSET) {
1484 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
1487 if (r->flags & IORESOURCE_IO)
1488 cmd |= PCI_COMMAND_IO;
1489 if (r->flags & IORESOURCE_MEM)
1490 cmd |= PCI_COMMAND_MEMORY;
1492 if (cmd != old_cmd) {
1493 printk("PCI: Enabling device %s (%04x -> %04x)\n",
1494 pci_name(dev), old_cmd, cmd);
1495 pci_write_config_word(dev, PCI_COMMAND, cmd);
1500 struct pci_controller*
1501 pci_bus_to_hose(int bus)
1503 struct pci_controller* hose = hose_head;
1505 for (; hose; hose = hose->next)
1506 if (bus >= hose->first_busno && bus <= hose->last_busno)
1512 pci_bus_io_base(unsigned int bus)
1514 struct pci_controller *hose;
1516 hose = pci_bus_to_hose(bus);
1519 return hose->io_base_virt;
1523 pci_bus_io_base_phys(unsigned int bus)
1525 struct pci_controller *hose;
1527 hose = pci_bus_to_hose(bus);
1530 return hose->io_base_phys;
1534 pci_bus_mem_base_phys(unsigned int bus)
1536 struct pci_controller *hose;
1538 hose = pci_bus_to_hose(bus);
1541 return hose->pci_mem_offset;
1545 pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
1547 /* Hack alert again ! See comments in chrp_pci.c
1549 struct pci_controller* hose =
1550 (struct pci_controller *)pdev->sysdata;
1551 if (hose && res->flags & IORESOURCE_MEM)
1552 return res->start - hose->pci_mem_offset;
1553 /* We may want to do something with IOs here... */
1558 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1559 resource_size_t *offset,
1560 enum pci_mmap_state mmap_state)
1562 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1563 unsigned long io_offset = 0;
1567 return NULL; /* should never happen */
1569 /* If memory, add on the PCI bridge address offset */
1570 if (mmap_state == pci_mmap_mem) {
1571 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
1572 *offset += hose->pci_mem_offset;
1574 res_bit = IORESOURCE_MEM;
1576 io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
1577 *offset += io_offset;
1578 res_bit = IORESOURCE_IO;
1582 * Check that the offset requested corresponds to one of the
1583 * resources of the device.
1585 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1586 struct resource *rp = &dev->resource[i];
1587 int flags = rp->flags;
1589 /* treat ROM as memory (should be already) */
1590 if (i == PCI_ROM_RESOURCE)
1591 flags |= IORESOURCE_MEM;
1593 /* Active and same type? */
1594 if ((flags & res_bit) == 0)
1597 /* In the range of this resource? */
1598 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
1601 /* found it! construct the final physical address */
1602 if (mmap_state == pci_mmap_io)
1603 *offset += hose->io_base_phys - io_offset;
1611 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1614 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1615 pgprot_t protection,
1616 enum pci_mmap_state mmap_state,
1619 unsigned long prot = pgprot_val(protection);
1621 /* Write combine is always 0 on non-memory space mappings. On
1622 * memory space, if the user didn't pass 1, we check for a
1623 * "prefetchable" resource. This is a bit hackish, but we use
1624 * this to workaround the inability of /sysfs to provide a write
1627 if (mmap_state != pci_mmap_mem)
1629 else if (write_combine == 0) {
1630 if (rp->flags & IORESOURCE_PREFETCH)
1634 /* XXX would be nice to have a way to ask for write-through */
1635 prot |= _PAGE_NO_CACHE;
1637 prot &= ~_PAGE_GUARDED;
1639 prot |= _PAGE_GUARDED;
1641 return __pgprot(prot);
1645 * This one is used by /dev/mem and fbdev who have no clue about the
1646 * PCI device, it tries to find the PCI device first and calls the
1649 pgprot_t pci_phys_mem_access_prot(struct file *file,
1652 pgprot_t protection)
1654 struct pci_dev *pdev = NULL;
1655 struct resource *found = NULL;
1656 unsigned long prot = pgprot_val(protection);
1657 unsigned long offset = pfn << PAGE_SHIFT;
1660 if (page_is_ram(pfn))
1663 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
1665 for_each_pci_dev(pdev) {
1666 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1667 struct resource *rp = &pdev->resource[i];
1668 int flags = rp->flags;
1670 /* Active and same type? */
1671 if ((flags & IORESOURCE_MEM) == 0)
1673 /* In the range of this resource? */
1674 if (offset < (rp->start & PAGE_MASK) ||
1684 if (found->flags & IORESOURCE_PREFETCH)
1685 prot &= ~_PAGE_GUARDED;
1689 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
1691 return __pgprot(prot);
1696 * Perform the actual remap of the pages for a PCI device mapping, as
1697 * appropriate for this architecture. The region in the process to map
1698 * is described by vm_start and vm_end members of VMA, the base physical
1699 * address is found in vm_pgoff.
1700 * The pci device structure is provided so that architectures may make mapping
1701 * decisions on a per-device or per-bus basis.
1703 * Returns a negative error code on failure, zero on success.
1705 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1706 enum pci_mmap_state mmap_state,
1709 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
1710 struct resource *rp;
1713 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
1717 vma->vm_pgoff = offset >> PAGE_SHIFT;
1718 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1720 mmap_state, write_combine);
1722 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1723 vma->vm_end - vma->vm_start, vma->vm_page_prot);
1728 /* Obsolete functions. Should be removed once the symbios driver
1732 phys_to_bus(unsigned long pa)
1734 struct pci_controller *hose;
1737 for (hose = hose_head; hose; hose = hose->next) {
1738 for (i = 0; i < 3; ++i) {
1739 if (pa >= hose->mem_resources[i].start
1740 && pa <= hose->mem_resources[i].end) {
1742 * XXX the hose->pci_mem_offset really
1743 * only applies to mem_resources[0].
1744 * We need a way to store an offset for
1745 * the others. -- paulus
1748 pa -= hose->pci_mem_offset;
1753 /* hmmm, didn't find it */
1758 pci_phys_to_bus(unsigned long pa, int busnr)
1760 struct pci_controller* hose = pci_bus_to_hose(busnr);
1763 return pa - hose->pci_mem_offset;
1767 pci_bus_to_phys(unsigned int ba, int busnr)
1769 struct pci_controller* hose = pci_bus_to_hose(busnr);
1772 return ba + hose->pci_mem_offset;
1775 /* Provide information on locations of various I/O regions in physical
1776 * memory. Do this on a per-card basis so that we choose the right
1778 * Note that the returned IO or memory base is a physical address
1781 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1783 struct pci_controller* hose;
1784 long result = -EOPNOTSUPP;
1786 /* Argh ! Please forgive me for that hack, but that's the
1787 * simplest way to get existing XFree to not lockup on some
1788 * G5 machines... So when something asks for bus 0 io base
1789 * (bus 0 is HT root), we return the AGP one instead.
1791 #ifdef CONFIG_PPC_PMAC
1792 if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
1795 #endif /* CONFIG_PPC_PMAC */
1797 hose = pci_bus_to_hose(bus);
1802 case IOBASE_BRIDGE_NUMBER:
1803 return (long)hose->first_busno;
1805 return (long)hose->pci_mem_offset;
1807 return (long)hose->io_base_phys;
1809 return (long)isa_io_base;
1810 case IOBASE_ISA_MEM:
1811 return (long)isa_mem_base;
1817 void pci_resource_to_user(const struct pci_dev *dev, int bar,
1818 const struct resource *rsrc,
1819 resource_size_t *start, resource_size_t *end)
1821 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1822 resource_size_t offset = 0;
1827 if (rsrc->flags & IORESOURCE_IO)
1828 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1830 /* We pass a fully fixed up address to userland for MMIO instead of
1831 * a BAR value because X is lame and expects to be able to use that
1832 * to pass to /dev/mem !
1834 * That means that we'll have potentially 64 bits values where some
1835 * userland apps only expect 32 (like X itself since it thinks only
1836 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1839 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1840 * has been fixed (and the fix spread enough), we can re-enable the
1841 * 2 lines below and pass down a BAR value to userland. In that case
1842 * we'll also have to re-enable the matching code in
1843 * __pci_mmap_make_offset().
1848 else if (rsrc->flags & IORESOURCE_MEM)
1849 offset = hose->pci_mem_offset;
1852 *start = rsrc->start - offset;
1853 *end = rsrc->end - offset;
1856 void __init pci_init_resource(struct resource *res, resource_size_t start,
1857 resource_size_t end, int flags, char *name)
1864 res->sibling = NULL;
1868 unsigned long pci_address_to_pio(phys_addr_t address)
1870 struct pci_controller* hose = hose_head;
1872 for (; hose; hose = hose->next) {
1873 unsigned int size = hose->io_resource.end -
1874 hose->io_resource.start + 1;
1875 if (address >= hose->io_base_phys &&
1876 address < (hose->io_base_phys + size)) {
1877 unsigned long base =
1878 (unsigned long)hose->io_base_virt - _IO_BASE;
1879 return base + (address - hose->io_base_phys);
1882 return (unsigned int)-1;
1884 EXPORT_SYMBOL(pci_address_to_pio);
1887 * Null PCI config access functions, for the case when we can't
1890 #define NULL_PCI_OP(rw, size, type) \
1892 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1894 return PCIBIOS_DEVICE_NOT_FOUND; \
1898 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1901 return PCIBIOS_DEVICE_NOT_FOUND;
1905 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1908 return PCIBIOS_DEVICE_NOT_FOUND;
1911 static struct pci_ops null_pci_ops =
1918 * These functions are used early on before PCI scanning is done
1919 * and all of the pci_dev and pci_bus structures have been created.
1921 static struct pci_bus *
1922 fake_pci_bus(struct pci_controller *hose, int busnr)
1924 static struct pci_bus bus;
1927 hose = pci_bus_to_hose(busnr);
1929 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1933 bus.ops = hose? hose->ops: &null_pci_ops;
1937 #define EARLY_PCI_OP(rw, size, type) \
1938 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1939 int devfn, int offset, type value) \
1941 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1942 devfn, offset, value); \
1945 EARLY_PCI_OP(read, byte, u8 *)
1946 EARLY_PCI_OP(read, word, u16 *)
1947 EARLY_PCI_OP(read, dword, u32 *)
1948 EARLY_PCI_OP(write, byte, u8)
1949 EARLY_PCI_OP(write, word, u16)
1950 EARLY_PCI_OP(write, dword, u32)