2 * Procedures for interfacing to Open Firmware.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stringify.h>
29 #include <linux/delay.h>
30 #include <linux/initrd.h>
31 #include <linux/bitops.h>
35 #include <asm/processor.h>
39 #include <asm/system.h>
41 #include <asm/pgtable.h>
43 #include <asm/iommu.h>
44 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/machdep.h>
48 #ifdef CONFIG_LOGO_LINUX_CLUT224
49 #include <linux/linux_logo.h>
50 extern const struct linux_logo logo_linux_clut224;
54 * Properties whose value is longer than this get excluded from our
55 * copy of the device tree. This value does need to be big enough to
56 * ensure that we don't lose things like the interrupt-map property
57 * on a PCI-PCI bridge.
59 #define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
62 * Eventually bump that one up
64 #define DEVTREE_CHUNK_SIZE 0x100000
67 * This is the size of the local memory reserve map that gets copied
68 * into the boot params passed to the kernel. That size is totally
69 * flexible as the kernel just reads the list until it encounters an
70 * entry with size 0, so it can be changed without breaking binary
73 #define MEM_RESERVE_MAP_SIZE 8
76 * prom_init() is called very early on, before the kernel text
77 * and data have been mapped to KERNELBASE. At this point the code
78 * is running at whatever address it has been loaded at.
79 * On ppc32 we compile with -mrelocatable, which means that references
80 * to extern and static variables get relocated automatically.
81 * On ppc64 we have to relocate the references explicitly with
82 * RELOC. (Note that strings count as static variables.)
84 * Because OF may have mapped I/O devices into the area starting at
85 * KERNELBASE, particularly on CHRP machines, we can't safely call
86 * OF once the kernel has been mapped to KERNELBASE. Therefore all
87 * OF calls must be done within prom_init().
89 * ADDR is used in calls to call_prom. The 4th and following
90 * arguments to call_prom should be 32-bit values.
91 * On ppc64, 64 bit values are truncated to 32 bits (and
92 * fortunately don't get interpreted as two arguments).
95 #define RELOC(x) (*PTRRELOC(&(x)))
96 #define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
97 #define OF_WORKAROUNDS 0
100 #define ADDR(x) (u32) (x)
101 #define OF_WORKAROUNDS of_workarounds
105 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
106 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
108 #define PROM_BUG() do { \
109 prom_printf("kernel BUG at %s line 0x%x!\n", \
110 RELOC(__FILE__), __LINE__); \
111 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
115 #define prom_debug(x...) prom_printf(x)
117 #define prom_debug(x...)
121 typedef u32 prom_arg_t;
139 struct mem_map_entry {
146 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
149 extern int enter_prom(struct prom_args *args, unsigned long entry);
151 static inline int enter_prom(struct prom_args *args, unsigned long entry)
153 return ((int (*)(struct prom_args *))entry)(args);
157 extern void copy_and_flush(unsigned long dest, unsigned long src,
158 unsigned long size, unsigned long offset);
161 static struct prom_t __initdata prom;
163 static unsigned long prom_entry __initdata;
165 #define PROM_SCRATCH_SIZE 256
167 static char __initdata of_stdout_device[256];
168 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
170 static unsigned long __initdata dt_header_start;
171 static unsigned long __initdata dt_struct_start, dt_struct_end;
172 static unsigned long __initdata dt_string_start, dt_string_end;
174 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
177 static int __initdata iommu_force_on;
178 static int __initdata ppc64_iommu_off;
179 static unsigned long __initdata prom_tce_alloc_start;
180 static unsigned long __initdata prom_tce_alloc_end;
183 static int __initdata of_platform;
185 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
187 static unsigned long __initdata prom_memory_limit;
189 static unsigned long __initdata alloc_top;
190 static unsigned long __initdata alloc_top_high;
191 static unsigned long __initdata alloc_bottom;
192 static unsigned long __initdata rmo_top;
193 static unsigned long __initdata ram_top;
195 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
196 static int __initdata mem_reserve_cnt;
198 static cell_t __initdata regbuf[1024];
201 #define MAX_CPU_THREADS 2
207 unsigned int threadid;
208 } hmt_thread_data[NR_CPUS];
209 #endif /* CONFIG_HMT */
212 * Error results ... some OF calls will return "-1" on error, some
213 * will return 0, some will return either. To simplify, here are
214 * macros to use with any ihandle or phandle return value to check if
218 #define PROM_ERROR (-1u)
219 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
220 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
223 /* This is the one and *ONLY* place where we actually call open
227 static int __init call_prom(const char *service, int nargs, int nret, ...)
230 struct prom_args args;
233 args.service = ADDR(service);
237 va_start(list, nret);
238 for (i = 0; i < nargs; i++)
239 args.args[i] = va_arg(list, prom_arg_t);
242 for (i = 0; i < nret; i++)
243 args.args[nargs+i] = 0;
245 if (enter_prom(&args, RELOC(prom_entry)) < 0)
248 return (nret > 0) ? args.args[nargs] : 0;
251 static int __init call_prom_ret(const char *service, int nargs, int nret,
252 prom_arg_t *rets, ...)
255 struct prom_args args;
258 args.service = ADDR(service);
262 va_start(list, rets);
263 for (i = 0; i < nargs; i++)
264 args.args[i] = va_arg(list, prom_arg_t);
267 for (i = 0; i < nret; i++)
270 if (enter_prom(&args, RELOC(prom_entry)) < 0)
274 for (i = 1; i < nret; ++i)
275 rets[i-1] = args.args[nargs+i];
277 return (nret > 0) ? args.args[nargs] : 0;
281 static void __init prom_print(const char *msg)
284 struct prom_t *_prom = &RELOC(prom);
286 if (_prom->stdout == 0)
289 for (p = msg; *p != 0; p = q) {
290 for (q = p; *q != 0 && *q != '\n'; ++q)
293 call_prom("write", 3, 1, _prom->stdout, p, q - p);
297 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
302 static void __init prom_print_hex(unsigned long val)
304 int i, nibbles = sizeof(val)*2;
305 char buf[sizeof(val)*2+1];
306 struct prom_t *_prom = &RELOC(prom);
308 for (i = nibbles-1; i >= 0; i--) {
309 buf[i] = (val & 0xf) + '0';
311 buf[i] += ('a'-'0'-10);
315 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
319 static void __init prom_printf(const char *format, ...)
321 const char *p, *q, *s;
324 struct prom_t *_prom = &RELOC(prom);
326 va_start(args, format);
328 format = PTRRELOC(format);
330 for (p = format; *p != 0; p = q) {
331 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
334 call_prom("write", 3, 1, _prom->stdout, p, q - p);
339 call_prom("write", 3, 1, _prom->stdout,
349 s = va_arg(args, const char *);
354 v = va_arg(args, unsigned long);
362 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
365 struct prom_t *_prom = &RELOC(prom);
367 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
369 * Old OF requires we claim physical and virtual separately
370 * and then map explicitly (assuming virtual mode)
375 ret = call_prom_ret("call-method", 5, 2, &result,
376 ADDR("claim"), _prom->memory,
378 if (ret != 0 || result == -1)
380 ret = call_prom_ret("call-method", 5, 2, &result,
381 ADDR("claim"), _prom->mmumap,
384 call_prom("call-method", 4, 1, ADDR("release"),
385 _prom->memory, size, virt);
388 /* the 0x12 is M (coherence) + PP == read/write */
389 call_prom("call-method", 6, 1,
390 ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
393 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
397 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
400 reason = PTRRELOC(reason);
403 /* ToDo: should put up an SRC here on p/iSeries */
404 call_prom("exit", 0, 0);
406 for (;;) /* should never get here */
411 static int __init prom_next_node(phandle *nodep)
415 if ((node = *nodep) != 0
416 && (*nodep = call_prom("child", 1, 1, node)) != 0)
418 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
421 if ((node = call_prom("parent", 1, 1, node)) == 0)
423 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
428 static int inline prom_getprop(phandle node, const char *pname,
429 void *value, size_t valuelen)
431 return call_prom("getprop", 4, 1, node, ADDR(pname),
432 (u32)(unsigned long) value, (u32) valuelen);
435 static int inline prom_getproplen(phandle node, const char *pname)
437 return call_prom("getproplen", 2, 1, node, ADDR(pname));
440 static void add_string(char **str, const char *q)
450 static char *tohex(unsigned int x)
452 static char digits[] = "0123456789abcdef";
453 static char result[9];
460 result[i] = digits[x & 0xf];
462 } while (x != 0 && i > 0);
466 static int __init prom_setprop(phandle node, const char *nodename,
467 const char *pname, void *value, size_t valuelen)
471 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
472 return call_prom("setprop", 4, 1, node, ADDR(pname),
473 (u32)(unsigned long) value, (u32) valuelen);
475 /* gah... setprop doesn't work on longtrail, have to use interpret */
477 add_string(&p, "dev");
478 add_string(&p, nodename);
479 add_string(&p, tohex((u32)(unsigned long) value));
480 add_string(&p, tohex(valuelen));
481 add_string(&p, tohex(ADDR(pname)));
482 add_string(&p, tohex(strlen(RELOC(pname))));
483 add_string(&p, "property");
485 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
488 /* We can't use the standard versions because of RELOC headaches. */
489 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
490 || ('a' <= (c) && (c) <= 'f') \
491 || ('A' <= (c) && (c) <= 'F'))
493 #define isdigit(c) ('0' <= (c) && (c) <= '9')
494 #define islower(c) ('a' <= (c) && (c) <= 'z')
495 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
497 unsigned long prom_strtoul(const char *cp, const char **endp)
499 unsigned long result = 0, base = 10, value;
504 if (toupper(*cp) == 'X') {
510 while (isxdigit(*cp) &&
511 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
512 result = result * base + value;
522 unsigned long prom_memparse(const char *ptr, const char **retptr)
524 unsigned long ret = prom_strtoul(ptr, retptr);
528 * We can't use a switch here because GCC *may* generate a
529 * jump table which won't work, because we're not running at
530 * the address we're linked at.
532 if ('G' == **retptr || 'g' == **retptr)
535 if ('M' == **retptr || 'm' == **retptr)
538 if ('K' == **retptr || 'k' == **retptr)
550 * Early parsing of the command line passed to the kernel, used for
551 * "mem=x" and the options that affect the iommu
553 static void __init early_cmdline_parse(void)
555 struct prom_t *_prom = &RELOC(prom);
559 RELOC(prom_cmd_line[0]) = 0;
560 p = RELOC(prom_cmd_line);
561 if ((long)_prom->chosen > 0)
562 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
563 #ifdef CONFIG_CMDLINE
564 if (l == 0) /* dbl check */
565 strlcpy(RELOC(prom_cmd_line),
566 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
567 #endif /* CONFIG_CMDLINE */
568 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
571 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
573 prom_printf("iommu opt is: %s\n", opt);
575 while (*opt && *opt == ' ')
577 if (!strncmp(opt, RELOC("off"), 3))
578 RELOC(ppc64_iommu_off) = 1;
579 else if (!strncmp(opt, RELOC("force"), 5))
580 RELOC(iommu_force_on) = 1;
584 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
587 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
589 /* Align to 16 MB == size of ppc64 large page */
590 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
595 #ifdef CONFIG_PPC_PSERIES
597 * To tell the firmware what our capabilities are, we have to pass
598 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
599 * that contain structures that contain the actual values.
601 static struct fake_elf {
608 char name[8]; /* "PowerPC" */
622 char name[24]; /* "IBM,RPA-Client-Config" */
636 .e_ident = { 0x7f, 'E', 'L', 'F',
637 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
638 .e_type = ET_EXEC, /* yeah right */
640 .e_version = EV_CURRENT,
641 .e_phoff = offsetof(struct fake_elf, phdr),
642 .e_phentsize = sizeof(Elf32_Phdr),
648 .p_offset = offsetof(struct fake_elf, chrpnote),
649 .p_filesz = sizeof(struct chrpnote)
652 .p_offset = offsetof(struct fake_elf, rpanote),
653 .p_filesz = sizeof(struct rpanote)
657 .namesz = sizeof("PowerPC"),
658 .descsz = sizeof(struct chrpdesc),
662 .real_mode = ~0U, /* ~0 means "don't care" */
671 .namesz = sizeof("IBM,RPA-Client-Config"),
672 .descsz = sizeof(struct rpadesc),
674 .name = "IBM,RPA-Client-Config",
677 .min_rmo_size = 64, /* in megabytes */
678 .min_rmo_percent = 0,
679 .max_pft_size = 48, /* 2^48 bytes max PFT size */
687 static void __init prom_send_capabilities(void)
691 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
692 if (elfloader == 0) {
693 prom_printf("couldn't open /packages/elf-loader\n");
696 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
697 elfloader, ADDR(&fake_elf));
698 call_prom("close", 1, 0, elfloader);
703 * Memory allocation strategy... our layout is normally:
705 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
706 * rare cases, initrd might end up being before the kernel though.
707 * We assume this won't override the final kernel at 0, we have no
708 * provision to handle that in this version, but it should hopefully
711 * alloc_top is set to the top of RMO, eventually shrink down if the
714 * alloc_bottom is set to the top of kernel/initrd
716 * from there, allocations are done this way : rtas is allocated
717 * topmost, and the device-tree is allocated from the bottom. We try
718 * to grow the device-tree allocation as we progress. If we can't,
719 * then we fail, we don't currently have a facility to restart
720 * elsewhere, but that shouldn't be necessary.
722 * Note that calls to reserve_mem have to be done explicitly, memory
723 * allocated with either alloc_up or alloc_down isn't automatically
729 * Allocates memory in the RMO upward from the kernel/initrd
731 * When align is 0, this is a special case, it means to allocate in place
732 * at the current location of alloc_bottom or fail (that is basically
733 * extending the previous allocation). Used for the device-tree flattening
735 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
737 unsigned long base = RELOC(alloc_bottom);
738 unsigned long addr = 0;
741 base = _ALIGN_UP(base, align);
742 prom_debug("alloc_up(%x, %x)\n", size, align);
743 if (RELOC(ram_top) == 0)
744 prom_panic("alloc_up() called with mem not initialized\n");
747 base = _ALIGN_UP(RELOC(alloc_bottom), align);
749 base = RELOC(alloc_bottom);
751 for(; (base + size) <= RELOC(alloc_top);
752 base = _ALIGN_UP(base + 0x100000, align)) {
753 prom_debug(" trying: 0x%x\n\r", base);
754 addr = (unsigned long)prom_claim(base, size, 0);
755 if (addr != PROM_ERROR && addr != 0)
763 RELOC(alloc_bottom) = addr;
765 prom_debug(" -> %x\n", addr);
766 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
767 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
768 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
769 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
770 prom_debug(" ram_top : %x\n", RELOC(ram_top));
776 * Allocates memory downward, either from top of RMO, or if highmem
777 * is set, from the top of RAM. Note that this one doesn't handle
778 * failures. It does claim memory if highmem is not set.
780 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
783 unsigned long base, addr = 0;
785 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
786 highmem ? RELOC("(high)") : RELOC("(low)"));
787 if (RELOC(ram_top) == 0)
788 prom_panic("alloc_down() called with mem not initialized\n");
791 /* Carve out storage for the TCE table. */
792 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
793 if (addr <= RELOC(alloc_bottom))
795 /* Will we bump into the RMO ? If yes, check out that we
796 * didn't overlap existing allocations there, if we did,
797 * we are dead, we must be the first in town !
799 if (addr < RELOC(rmo_top)) {
800 /* Good, we are first */
801 if (RELOC(alloc_top) == RELOC(rmo_top))
802 RELOC(alloc_top) = RELOC(rmo_top) = addr;
806 RELOC(alloc_top_high) = addr;
810 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
811 for (; base > RELOC(alloc_bottom);
812 base = _ALIGN_DOWN(base - 0x100000, align)) {
813 prom_debug(" trying: 0x%x\n\r", base);
814 addr = (unsigned long)prom_claim(base, size, 0);
815 if (addr != PROM_ERROR && addr != 0)
821 RELOC(alloc_top) = addr;
824 prom_debug(" -> %x\n", addr);
825 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
826 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
827 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
828 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
829 prom_debug(" ram_top : %x\n", RELOC(ram_top));
837 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
842 /* Ignore more than 2 cells */
843 while (s > sizeof(unsigned long) / 4) {
859 * Very dumb function for adding to the memory reserve list, but
860 * we don't need anything smarter at this point
862 * XXX Eventually check for collisions. They should NEVER happen.
863 * If problems seem to show up, it would be a good start to track
866 static void reserve_mem(unsigned long base, unsigned long size)
868 unsigned long top = base + size;
869 unsigned long cnt = RELOC(mem_reserve_cnt);
874 /* We need to always keep one empty entry so that we
875 * have our terminator with "size" set to 0 since we are
876 * dumb and just copy this entire array to the boot params
878 base = _ALIGN_DOWN(base, PAGE_SIZE);
879 top = _ALIGN_UP(top, PAGE_SIZE);
882 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
883 prom_panic("Memory reserve map exhausted !\n");
884 RELOC(mem_reserve_map)[cnt].base = base;
885 RELOC(mem_reserve_map)[cnt].size = size;
886 RELOC(mem_reserve_cnt) = cnt + 1;
890 * Initialize memory allocation mecanism, parse "memory" nodes and
891 * obtain that way the top of memory and RMO to setup out local allocator
893 static void __init prom_init_mem(void)
896 char *path, type[64];
899 struct prom_t *_prom = &RELOC(prom);
903 * We iterate the memory nodes to find
904 * 1) top of RMO (first node)
908 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
910 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
911 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
912 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
914 prom_debug("scanning memory:\n");
915 path = RELOC(prom_scratch);
917 for (node = 0; prom_next_node(&node); ) {
919 prom_getprop(node, "device_type", type, sizeof(type));
923 * CHRP Longtrail machines have no device_type
924 * on the memory node, so check the name instead...
926 prom_getprop(node, "name", type, sizeof(type));
928 if (strcmp(type, RELOC("memory")))
931 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
932 if (plen > sizeof(regbuf)) {
933 prom_printf("memory node too large for buffer !\n");
934 plen = sizeof(regbuf);
937 endp = p + (plen / sizeof(cell_t));
940 memset(path, 0, PROM_SCRATCH_SIZE);
941 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
942 prom_debug(" node %s :\n", path);
943 #endif /* DEBUG_PROM */
945 while ((endp - p) >= (rac + rsc)) {
946 unsigned long base, size;
948 base = prom_next_cell(rac, &p);
949 size = prom_next_cell(rsc, &p);
953 prom_debug(" %x %x\n", base, size);
955 RELOC(rmo_top) = size;
956 if ((base + size) > RELOC(ram_top))
957 RELOC(ram_top) = base + size;
961 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
963 /* Check if we have an initrd after the kernel, if we do move our bottom
966 if (RELOC(prom_initrd_start)) {
967 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
968 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
972 * If prom_memory_limit is set we reduce the upper limits *except* for
973 * alloc_top_high. This must be the real top of RAM so we can put
977 RELOC(alloc_top_high) = RELOC(ram_top);
979 if (RELOC(prom_memory_limit)) {
980 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
981 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
982 RELOC(prom_memory_limit));
983 RELOC(prom_memory_limit) = 0;
984 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
985 prom_printf("Ignoring mem=%x >= ram_top.\n",
986 RELOC(prom_memory_limit));
987 RELOC(prom_memory_limit) = 0;
989 RELOC(ram_top) = RELOC(prom_memory_limit);
990 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
995 * Setup our top alloc point, that is top of RMO or top of
996 * segment 0 when running non-LPAR.
997 * Some RS64 machines have buggy firmware where claims up at
998 * 1GB fail. Cap at 768MB as a workaround.
999 * Since 768MB is plenty of room, and we need to cap to something
1000 * reasonable on 32-bit, cap at 768MB on all machines.
1002 if (!RELOC(rmo_top))
1003 RELOC(rmo_top) = RELOC(ram_top);
1004 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
1005 RELOC(alloc_top) = RELOC(rmo_top);
1007 prom_printf("memory layout at init:\n");
1008 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
1009 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
1010 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
1011 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
1012 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
1013 prom_printf(" ram_top : %x\n", RELOC(ram_top));
1018 * Allocate room for and instantiate RTAS
1020 static void __init prom_instantiate_rtas(void)
1024 u32 base, entry = 0;
1027 prom_debug("prom_instantiate_rtas: start...\n");
1029 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1030 prom_debug("rtas_node: %x\n", rtas_node);
1031 if (!PHANDLE_VALID(rtas_node))
1034 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
1038 base = alloc_down(size, PAGE_SIZE, 0);
1040 prom_printf("RTAS allocation failed !\n");
1044 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1045 if (!IHANDLE_VALID(rtas_inst)) {
1046 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1050 prom_printf("instantiating rtas at 0x%x ...", base);
1052 if (call_prom_ret("call-method", 3, 2, &entry,
1053 ADDR("instantiate-rtas"),
1054 rtas_inst, base) != 0
1056 prom_printf(" failed\n");
1059 prom_printf(" done\n");
1061 reserve_mem(base, size);
1063 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1064 &base, sizeof(base));
1065 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1066 &entry, sizeof(entry));
1068 prom_debug("rtas base = 0x%x\n", base);
1069 prom_debug("rtas entry = 0x%x\n", entry);
1070 prom_debug("rtas size = 0x%x\n", (long)size);
1072 prom_debug("prom_instantiate_rtas: end...\n");
1077 * Allocate room for and initialize TCE tables
1079 static void __init prom_initialize_tce_table(void)
1083 char compatible[64], type[64], model[64];
1084 char *path = RELOC(prom_scratch);
1086 u32 minalign, minsize;
1087 u64 tce_entry, *tce_entryp;
1088 u64 local_alloc_top, local_alloc_bottom;
1091 if (RELOC(ppc64_iommu_off))
1094 prom_debug("starting prom_initialize_tce_table\n");
1096 /* Cache current top of allocs so we reserve a single block */
1097 local_alloc_top = RELOC(alloc_top_high);
1098 local_alloc_bottom = local_alloc_top;
1100 /* Search all nodes looking for PHBs. */
1101 for (node = 0; prom_next_node(&node); ) {
1105 prom_getprop(node, "compatible",
1106 compatible, sizeof(compatible));
1107 prom_getprop(node, "device_type", type, sizeof(type));
1108 prom_getprop(node, "model", model, sizeof(model));
1110 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1113 /* Keep the old logic in tack to avoid regression. */
1114 if (compatible[0] != 0) {
1115 if ((strstr(compatible, RELOC("python")) == NULL) &&
1116 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1117 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1119 } else if (model[0] != 0) {
1120 if ((strstr(model, RELOC("ython")) == NULL) &&
1121 (strstr(model, RELOC("peedwagon")) == NULL) &&
1122 (strstr(model, RELOC("innipeg")) == NULL))
1126 if (prom_getprop(node, "tce-table-minalign", &minalign,
1127 sizeof(minalign)) == PROM_ERROR)
1129 if (prom_getprop(node, "tce-table-minsize", &minsize,
1130 sizeof(minsize)) == PROM_ERROR)
1131 minsize = 4UL << 20;
1134 * Even though we read what OF wants, we just set the table
1135 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1136 * By doing this, we avoid the pitfalls of trying to DMA to
1137 * MMIO space and the DMA alias hole.
1139 * On POWER4, firmware sets the TCE region by assuming
1140 * each TCE table is 8MB. Using this memory for anything
1141 * else will impact performance, so we always allocate 8MB.
1144 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1145 minsize = 8UL << 20;
1147 minsize = 4UL << 20;
1149 /* Align to the greater of the align or size */
1150 align = max(minalign, minsize);
1151 base = alloc_down(minsize, align, 1);
1153 prom_panic("ERROR, cannot find space for TCE table.\n");
1154 if (base < local_alloc_bottom)
1155 local_alloc_bottom = base;
1157 /* It seems OF doesn't null-terminate the path :-( */
1158 memset(path, 0, sizeof(path));
1159 /* Call OF to setup the TCE hardware */
1160 if (call_prom("package-to-path", 3, 1, node,
1161 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1162 prom_printf("package-to-path failed\n");
1165 /* Save away the TCE table attributes for later use. */
1166 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1167 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1169 prom_debug("TCE table: %s\n", path);
1170 prom_debug("\tnode = 0x%x\n", node);
1171 prom_debug("\tbase = 0x%x\n", base);
1172 prom_debug("\tsize = 0x%x\n", minsize);
1174 /* Initialize the table to have a one-to-one mapping
1175 * over the allocated size.
1177 tce_entryp = (unsigned long *)base;
1178 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1179 tce_entry = (i << PAGE_SHIFT);
1181 *tce_entryp = tce_entry;
1184 prom_printf("opening PHB %s", path);
1185 phb_node = call_prom("open", 1, 1, path);
1187 prom_printf("... failed\n");
1189 prom_printf("... done\n");
1191 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1192 phb_node, -1, minsize,
1193 (u32) base, (u32) (base >> 32));
1194 call_prom("close", 1, 0, phb_node);
1197 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1199 if (RELOC(prom_memory_limit)) {
1201 * We align the start to a 16MB boundary so we can map
1202 * the TCE area using large pages if possible.
1203 * The end should be the top of RAM so no need to align it.
1205 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1207 RELOC(prom_tce_alloc_end) = local_alloc_top;
1210 /* Flag the first invalid entry */
1211 prom_debug("ending prom_initialize_tce_table\n");
1216 * With CHRP SMP we need to use the OF to start the other processors.
1217 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1218 * so we have to put the processors into a holding pattern controlled
1219 * by the kernel (not OF) before we destroy the OF.
1221 * This uses a chunk of low memory, puts some holding pattern
1222 * code there and sends the other processors off to there until
1223 * smp_boot_cpus tells them to do something. The holding pattern
1224 * checks that address until its cpu # is there, when it is that
1225 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1226 * of setting those values.
1228 * We also use physical address 0x4 here to tell when a cpu
1229 * is in its holding pattern code.
1233 extern void __secondary_hold(void);
1234 extern unsigned long __secondary_hold_spinloop;
1235 extern unsigned long __secondary_hold_acknowledge;
1238 * We want to reference the copy of __secondary_hold_* in the
1239 * 0 - 0x100 address range
1241 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1243 static void __init prom_hold_cpus(void)
1250 unsigned int interrupt_server[MAX_CPU_THREADS];
1251 unsigned int cpu_threads, hw_cpu_num;
1253 struct prom_t *_prom = &RELOC(prom);
1254 unsigned long *spinloop
1255 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1256 unsigned long *acknowledge
1257 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1259 /* __secondary_hold is actually a descriptor, not the text address */
1260 unsigned long secondary_hold
1261 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1263 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1266 prom_debug("prom_hold_cpus: start...\n");
1267 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1268 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1269 prom_debug(" 1) acknowledge = 0x%x\n",
1270 (unsigned long)acknowledge);
1271 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1272 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1274 /* Set the common spinloop variable, so all of the secondary cpus
1275 * will block when they are awakened from their OF spinloop.
1276 * This must occur for both SMP and non SMP kernels, since OF will
1277 * be trashed when we move the kernel.
1282 for (i = 0; i < NR_CPUS; i++)
1283 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1286 for (node = 0; prom_next_node(&node); ) {
1288 prom_getprop(node, "device_type", type, sizeof(type));
1289 if (strcmp(type, RELOC("cpu")) != 0)
1292 /* Skip non-configured cpus. */
1293 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1294 if (strcmp(type, RELOC("okay")) != 0)
1298 prom_getprop(node, "reg", ®, sizeof(reg));
1300 prom_debug("\ncpuid = 0x%x\n", cpuid);
1301 prom_debug("cpu hw idx = 0x%x\n", reg);
1303 /* Init the acknowledge var which will be reset by
1304 * the secondary cpu when it awakens from its OF
1307 *acknowledge = (unsigned long)-1;
1309 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1311 sizeof(interrupt_server));
1313 /* no property. old hardware has no SMT */
1315 interrupt_server[0] = reg; /* fake it with phys id */
1317 /* We have a threaded processor */
1318 cpu_threads = propsize / sizeof(u32);
1319 if (cpu_threads > MAX_CPU_THREADS) {
1320 prom_printf("SMT: too many threads!\n"
1321 "SMT: found %x, max is %x\n",
1322 cpu_threads, MAX_CPU_THREADS);
1323 cpu_threads = 1; /* ToDo: panic? */
1327 hw_cpu_num = interrupt_server[0];
1328 if (hw_cpu_num != _prom->cpu) {
1329 /* Primary Thread of non-boot cpu */
1330 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1331 call_prom("start-cpu", 3, 0, node,
1332 secondary_hold, reg);
1334 for (i = 0; (i < 100000000) &&
1335 (*acknowledge == ((unsigned long)-1)); i++ )
1338 if (*acknowledge == reg)
1339 prom_printf("done\n");
1341 prom_printf("failed: %x\n", *acknowledge);
1345 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1346 #endif /* CONFIG_SMP */
1348 /* Reserve cpu #s for secondary threads. They start later. */
1349 cpuid += cpu_threads;
1352 /* Only enable HMT on processors that provide support. */
1353 if (__is_processor(PV_PULSAR) ||
1354 __is_processor(PV_ICESTAR) ||
1355 __is_processor(PV_SSTAR)) {
1356 prom_printf(" starting secondary threads\n");
1358 for (i = 0; i < NR_CPUS; i += 2) {
1363 unsigned long pir = mfspr(SPRN_PIR);
1364 if (__is_processor(PV_PULSAR)) {
1365 RELOC(hmt_thread_data)[i].pir =
1368 RELOC(hmt_thread_data)[i].pir =
1374 prom_printf("Processor is not HMT capable\n");
1378 if (cpuid > NR_CPUS)
1379 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1380 ") exceeded: ignoring extras\n");
1382 prom_debug("prom_hold_cpus: end...\n");
1386 static void __init prom_init_client_services(unsigned long pp)
1388 struct prom_t *_prom = &RELOC(prom);
1390 /* Get a handle to the prom entry point before anything else */
1391 RELOC(prom_entry) = pp;
1393 /* get a handle for the stdout device */
1394 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1395 if (!PHANDLE_VALID(_prom->chosen))
1396 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1398 /* get device tree root */
1399 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1400 if (!PHANDLE_VALID(_prom->root))
1401 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1408 * For really old powermacs, we need to map things we claim.
1409 * For that, we need the ihandle of the mmu.
1410 * Also, on the longtrail, we need to work around other bugs.
1412 static void __init prom_find_mmu(void)
1414 struct prom_t *_prom = &RELOC(prom);
1418 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1419 if (!PHANDLE_VALID(oprom))
1421 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1423 version[sizeof(version) - 1] = 0;
1424 /* XXX might need to add other versions here */
1425 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1426 of_workarounds = OF_WA_CLAIM;
1427 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1428 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1429 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1432 _prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
1433 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1434 sizeof(_prom->mmumap));
1435 if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
1436 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1439 #define prom_find_mmu()
1442 static void __init prom_init_stdout(void)
1444 struct prom_t *_prom = &RELOC(prom);
1445 char *path = RELOC(of_stdout_device);
1449 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1450 prom_panic("cannot find stdout");
1452 _prom->stdout = val;
1454 /* Get the full OF pathname of the stdout device */
1455 memset(path, 0, 256);
1456 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1457 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1458 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
1460 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1461 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
1462 path, strlen(path) + 1);
1464 /* If it's a display, note it */
1465 memset(type, 0, sizeof(type));
1466 prom_getprop(val, "device_type", type, sizeof(type));
1467 if (strcmp(type, RELOC("display")) == 0)
1468 prom_setprop(val, path, "linux,boot-display", NULL, 0);
1471 static void __init prom_close_stdin(void)
1473 struct prom_t *_prom = &RELOC(prom);
1476 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1477 call_prom("close", 1, 0, val);
1480 static int __init prom_find_machine_type(void)
1482 struct prom_t *_prom = &RELOC(prom);
1488 len = prom_getprop(_prom->root, "compatible",
1489 compat, sizeof(compat)-1);
1493 char *p = &compat[i];
1497 if (strstr(p, RELOC("Power Macintosh")) ||
1498 strstr(p, RELOC("MacRISC")))
1499 return PLATFORM_POWERMAC;
1501 if (strstr(p, RELOC("Momentum,Maple")))
1502 return PLATFORM_MAPLE;
1508 /* Default to pSeries. We need to know if we are running LPAR */
1509 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1510 if (PHANDLE_VALID(rtas)) {
1511 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1512 if (x != PROM_ERROR) {
1513 prom_printf("Hypertas detected, assuming LPAR !\n");
1514 return PLATFORM_PSERIES_LPAR;
1517 return PLATFORM_PSERIES;
1519 return PLATFORM_CHRP;
1523 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1525 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1529 * If we have a display that we don't know how to drive,
1530 * we will want to try to execute OF's open method for it
1531 * later. However, OF will probably fall over if we do that
1532 * we've taken over the MMU.
1533 * So we check whether we will need to open the display,
1534 * and if so, open it now.
1536 static void __init prom_check_displays(void)
1538 char type[16], *path;
1543 static unsigned char default_colors[] = {
1561 const unsigned char *clut;
1563 prom_printf("Looking for displays\n");
1564 for (node = 0; prom_next_node(&node); ) {
1565 memset(type, 0, sizeof(type));
1566 prom_getprop(node, "device_type", type, sizeof(type));
1567 if (strcmp(type, RELOC("display")) != 0)
1570 /* It seems OF doesn't null-terminate the path :-( */
1571 path = RELOC(prom_scratch);
1572 memset(path, 0, PROM_SCRATCH_SIZE);
1575 * leave some room at the end of the path for appending extra
1578 if (call_prom("package-to-path", 3, 1, node, path,
1579 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1581 prom_printf("found display : %s, opening ... ", path);
1583 ih = call_prom("open", 1, 1, path);
1585 prom_printf("failed\n");
1590 prom_printf("done\n");
1591 prom_setprop(node, path, "linux,opened", NULL, 0);
1593 /* Setup a usable color table when the appropriate
1594 * method is available. Should update this to set-colors */
1595 clut = RELOC(default_colors);
1596 for (i = 0; i < 32; i++, clut += 3)
1597 if (prom_set_color(ih, i, clut[0], clut[1],
1601 #ifdef CONFIG_LOGO_LINUX_CLUT224
1602 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1603 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1604 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1607 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
1612 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1613 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1614 unsigned long needed, unsigned long align)
1618 *mem_start = _ALIGN(*mem_start, align);
1619 while ((*mem_start + needed) > *mem_end) {
1620 unsigned long room, chunk;
1622 prom_debug("Chunk exhausted, claiming more at %x...\n",
1623 RELOC(alloc_bottom));
1624 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1625 if (room > DEVTREE_CHUNK_SIZE)
1626 room = DEVTREE_CHUNK_SIZE;
1627 if (room < PAGE_SIZE)
1628 prom_panic("No memory for flatten_device_tree (no room)");
1629 chunk = alloc_up(room, 0);
1631 prom_panic("No memory for flatten_device_tree (claim failed)");
1632 *mem_end = RELOC(alloc_top);
1635 ret = (void *)*mem_start;
1636 *mem_start += needed;
1641 #define dt_push_token(token, mem_start, mem_end) \
1642 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1644 static unsigned long __init dt_find_string(char *str)
1648 s = os = (char *)RELOC(dt_string_start);
1650 while (s < (char *)RELOC(dt_string_end)) {
1651 if (strcmp(s, str) == 0)
1659 * The Open Firmware 1275 specification states properties must be 31 bytes or
1660 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1662 #define MAX_PROPERTY_NAME 64
1664 static void __init scan_dt_build_strings(phandle node,
1665 unsigned long *mem_start,
1666 unsigned long *mem_end)
1668 char *prev_name, *namep, *sstart;
1672 sstart = (char *)RELOC(dt_string_start);
1674 /* get and store all property names */
1675 prev_name = RELOC("");
1677 /* 64 is max len of name including nul. */
1678 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1679 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1680 /* No more nodes: unwind alloc */
1681 *mem_start = (unsigned long)namep;
1686 if (strcmp(namep, RELOC("name")) == 0) {
1687 *mem_start = (unsigned long)namep;
1688 prev_name = RELOC("name");
1691 /* get/create string entry */
1692 soff = dt_find_string(namep);
1694 *mem_start = (unsigned long)namep;
1695 namep = sstart + soff;
1697 /* Trim off some if we can */
1698 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1699 RELOC(dt_string_end) = *mem_start;
1704 /* do all our children */
1705 child = call_prom("child", 1, 1, node);
1706 while (child != 0) {
1707 scan_dt_build_strings(child, mem_start, mem_end);
1708 child = call_prom("peer", 1, 1, child);
1712 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1713 unsigned long *mem_end)
1716 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1718 unsigned char *valp;
1719 static char pname[MAX_PROPERTY_NAME];
1722 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1724 /* get the node's full name */
1725 namep = (char *)*mem_start;
1726 room = *mem_end - *mem_start;
1729 l = call_prom("package-to-path", 3, 1, node, namep, room);
1731 /* Didn't fit? Get more room. */
1733 if (l >= *mem_end - *mem_start)
1734 namep = make_room(mem_start, mem_end, l+1, 1);
1735 call_prom("package-to-path", 3, 1, node, namep, l);
1739 /* Fixup an Apple bug where they have bogus \0 chars in the
1740 * middle of the path in some properties, and extract
1741 * the unit name (everything after the last '/').
1743 for (lp = p = namep, ep = namep + l; p < ep; p++) {
1750 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
1753 /* get it again for debugging */
1754 path = RELOC(prom_scratch);
1755 memset(path, 0, PROM_SCRATCH_SIZE);
1756 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1758 /* get and store all properties */
1759 prev_name = RELOC("");
1760 sstart = (char *)RELOC(dt_string_start);
1762 if (call_prom("nextprop", 3, 1, node, prev_name,
1767 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1768 prev_name = RELOC("name");
1772 /* find string offset */
1773 soff = dt_find_string(RELOC(pname));
1775 prom_printf("WARNING: Can't find string index for"
1776 " <%s>, node %s\n", RELOC(pname), path);
1779 prev_name = sstart + soff;
1782 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1785 if (l == PROM_ERROR)
1787 if (l > MAX_PROPERTY_LENGTH) {
1788 prom_printf("WARNING: ignoring large property ");
1789 /* It seems OF doesn't null-terminate the path :-( */
1790 prom_printf("[%s] ", path);
1791 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1795 /* push property head */
1796 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1797 dt_push_token(l, mem_start, mem_end);
1798 dt_push_token(soff, mem_start, mem_end);
1800 /* push property content */
1801 valp = make_room(mem_start, mem_end, l, 4);
1802 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1803 *mem_start = _ALIGN(*mem_start, 4);
1806 /* Add a "linux,phandle" property. */
1807 soff = dt_find_string(RELOC("linux,phandle"));
1809 prom_printf("WARNING: Can't find string index for"
1810 " <linux-phandle> node %s\n", path);
1812 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1813 dt_push_token(4, mem_start, mem_end);
1814 dt_push_token(soff, mem_start, mem_end);
1815 valp = make_room(mem_start, mem_end, 4, 4);
1816 *(u32 *)valp = node;
1819 /* do all our children */
1820 child = call_prom("child", 1, 1, node);
1821 while (child != 0) {
1822 scan_dt_build_struct(child, mem_start, mem_end);
1823 child = call_prom("peer", 1, 1, child);
1826 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1829 static void __init flatten_device_tree(void)
1832 unsigned long mem_start, mem_end, room;
1833 struct boot_param_header *hdr;
1834 struct prom_t *_prom = &RELOC(prom);
1839 * Check how much room we have between alloc top & bottom (+/- a
1840 * few pages), crop to 4Mb, as this is our "chuck" size
1842 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1843 if (room > DEVTREE_CHUNK_SIZE)
1844 room = DEVTREE_CHUNK_SIZE;
1845 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1847 /* Now try to claim that */
1848 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1850 prom_panic("Can't allocate initial device-tree chunk\n");
1851 mem_end = RELOC(alloc_top);
1853 /* Get root of tree */
1854 root = call_prom("peer", 1, 1, (phandle)0);
1855 if (root == (phandle)0)
1856 prom_panic ("couldn't get device tree root\n");
1858 /* Build header and make room for mem rsv map */
1859 mem_start = _ALIGN(mem_start, 4);
1860 hdr = make_room(&mem_start, &mem_end,
1861 sizeof(struct boot_param_header), 4);
1862 RELOC(dt_header_start) = (unsigned long)hdr;
1863 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1865 /* Start of strings */
1866 mem_start = PAGE_ALIGN(mem_start);
1867 RELOC(dt_string_start) = mem_start;
1868 mem_start += 4; /* hole */
1870 /* Add "linux,phandle" in there, we'll need it */
1871 namep = make_room(&mem_start, &mem_end, 16, 1);
1872 strcpy(namep, RELOC("linux,phandle"));
1873 mem_start = (unsigned long)namep + strlen(namep) + 1;
1875 /* Build string array */
1876 prom_printf("Building dt strings...\n");
1877 scan_dt_build_strings(root, &mem_start, &mem_end);
1878 RELOC(dt_string_end) = mem_start;
1880 /* Build structure */
1881 mem_start = PAGE_ALIGN(mem_start);
1882 RELOC(dt_struct_start) = mem_start;
1883 prom_printf("Building dt structure...\n");
1884 scan_dt_build_struct(root, &mem_start, &mem_end);
1885 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1886 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1889 hdr->boot_cpuid_phys = _prom->cpu;
1890 hdr->magic = OF_DT_HEADER;
1891 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1892 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1893 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1894 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1895 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1896 hdr->version = OF_DT_VERSION;
1897 /* Version 16 is not backward compatible */
1898 hdr->last_comp_version = 0x10;
1900 /* Reserve the whole thing and copy the reserve map in, we
1901 * also bump mem_reserve_cnt to cause further reservations to
1902 * fail since it's too late.
1904 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1905 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1910 prom_printf("reserved memory map:\n");
1911 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1912 prom_printf(" %x - %x\n",
1913 RELOC(mem_reserve_map)[i].base,
1914 RELOC(mem_reserve_map)[i].size);
1917 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1919 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1920 RELOC(dt_string_start), RELOC(dt_string_end));
1921 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1922 RELOC(dt_struct_start), RELOC(dt_struct_end));
1927 static void __init fixup_device_tree(void)
1929 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1930 phandle u3, i2c, mpic;
1935 /* Some G5s have a missing interrupt definition, fix it up here */
1936 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1937 if (!PHANDLE_VALID(u3))
1939 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1940 if (!PHANDLE_VALID(i2c))
1942 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1943 if (!PHANDLE_VALID(mpic))
1946 /* check if proper rev of u3 */
1947 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1950 if (u3_rev < 0x35 || u3_rev > 0x39)
1952 /* does it need fixup ? */
1953 if (prom_getproplen(i2c, "interrupts") > 0)
1956 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1958 /* interrupt on this revision of u3 is number 0 and level */
1961 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
1962 &interrupts, sizeof(interrupts));
1964 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
1965 &parent, sizeof(parent));
1970 static void __init prom_find_boot_cpu(void)
1972 struct prom_t *_prom = &RELOC(prom);
1978 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1981 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1983 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1984 _prom->cpu = getprop_rval;
1986 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1989 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1991 #ifdef CONFIG_BLK_DEV_INITRD
1992 struct prom_t *_prom = &RELOC(prom);
1994 if (r3 && r4 && r4 != 0xdeadbeef) {
1997 RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
1998 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
2000 val = RELOC(prom_initrd_start);
2001 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
2003 val = RELOC(prom_initrd_end);
2004 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
2007 reserve_mem(RELOC(prom_initrd_start),
2008 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
2010 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
2011 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
2013 #endif /* CONFIG_BLK_DEV_INITRD */
2017 * We enter here early on, when the Open Firmware prom is still
2018 * handling exceptions and the MMU hash table for us.
2021 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2023 unsigned long r6, unsigned long r7)
2025 struct prom_t *_prom;
2028 unsigned long offset = reloc_offset();
2034 _prom = &RELOC(prom);
2037 * First zero the BSS
2039 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
2042 * Init interface to Open Firmware, get some node references,
2045 prom_init_client_services(pp);
2048 * See if this OF is old enough that we need to do explicit maps
2049 * and other workarounds
2054 * Init prom stdout device
2059 * Check for an initrd
2061 prom_check_initrd(r3, r4);
2064 * Get default machine type. At this point, we do not differentiate
2065 * between pSeries SMP and pSeries LPAR
2067 RELOC(of_platform) = prom_find_machine_type();
2068 getprop_rval = RELOC(of_platform);
2069 prom_setprop(_prom->chosen, "/chosen", "linux,platform",
2070 &getprop_rval, sizeof(getprop_rval));
2072 #ifdef CONFIG_PPC_PSERIES
2074 * On pSeries, inform the firmware about our capabilities
2076 if (RELOC(of_platform) == PLATFORM_PSERIES ||
2077 RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
2078 prom_send_capabilities();
2082 * Copy the CPU hold code
2084 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2085 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2088 * Do early parsing of command line
2090 early_cmdline_parse();
2093 * Initialize memory management within prom_init
2098 * Determine which cpu is actually running right _now_
2100 prom_find_boot_cpu();
2103 * Initialize display devices
2105 prom_check_displays();
2109 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2110 * that uses the allocator, we need to make sure we get the top of memory
2111 * available for us here...
2113 if (RELOC(of_platform) == PLATFORM_PSERIES)
2114 prom_initialize_tce_table();
2118 * On non-powermacs, try to instantiate RTAS and puts all CPUs
2119 * in spin-loops. PowerMacs don't have a working RTAS and use
2120 * a different way to spin CPUs
2122 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2123 prom_instantiate_rtas();
2128 * Fill in some infos for use by the kernel later on
2130 if (RELOC(prom_memory_limit))
2131 prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
2132 &RELOC(prom_memory_limit),
2133 sizeof(prom_memory_limit));
2135 if (RELOC(ppc64_iommu_off))
2136 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
2139 if (RELOC(iommu_force_on))
2140 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
2143 if (RELOC(prom_tce_alloc_start)) {
2144 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
2145 &RELOC(prom_tce_alloc_start),
2146 sizeof(prom_tce_alloc_start));
2147 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
2148 &RELOC(prom_tce_alloc_end),
2149 sizeof(prom_tce_alloc_end));
2154 * Fixup any known bugs in the device-tree
2156 fixup_device_tree();
2159 * Now finally create the flattened device-tree
2161 prom_printf("copying OF device tree ...\n");
2162 flatten_device_tree();
2165 * in case stdin is USB and still active on IBM machines...
2166 * Unfortunately quiesce crashes on some powermacs if we have
2167 * closed stdin already (in particular the powerbook 101).
2169 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2173 * Call OF "quiesce" method to shut down pending DMA's from
2176 prom_printf("Calling quiesce ...\n");
2177 call_prom("quiesce", 0, 0);
2180 * And finally, call the kernel passing it the flattened device
2181 * tree and NULL as r5, thus triggering the new entry point which
2182 * is common to us and kexec
2184 hdr = RELOC(dt_header_start);
2185 prom_printf("returning from prom_init\n");
2186 prom_debug("->dt_header_start=0x%x\n", hdr);
2189 reloc_got2(-offset);
2192 __start(hdr, KERNELBASE + offset, 0);