2 * Procedures for interfacing to Open Firmware.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stringify.h>
29 #include <linux/delay.h>
30 #include <linux/initrd.h>
31 #include <linux/bitops.h>
35 #include <asm/processor.h>
39 #include <asm/system.h>
41 #include <asm/pgtable.h>
43 #include <asm/iommu.h>
44 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/machdep.h>
48 #ifdef CONFIG_LOGO_LINUX_CLUT224
49 #include <linux/linux_logo.h>
50 extern const struct linux_logo logo_linux_clut224;
54 * Properties whose value is longer than this get excluded from our
55 * copy of the device tree. This value does need to be big enough to
56 * ensure that we don't lose things like the interrupt-map property
57 * on a PCI-PCI bridge.
59 #define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
62 * Eventually bump that one up
64 #define DEVTREE_CHUNK_SIZE 0x100000
67 * This is the size of the local memory reserve map that gets copied
68 * into the boot params passed to the kernel. That size is totally
69 * flexible as the kernel just reads the list until it encounters an
70 * entry with size 0, so it can be changed without breaking binary
73 #define MEM_RESERVE_MAP_SIZE 8
76 * prom_init() is called very early on, before the kernel text
77 * and data have been mapped to KERNELBASE. At this point the code
78 * is running at whatever address it has been loaded at.
79 * On ppc32 we compile with -mrelocatable, which means that references
80 * to extern and static variables get relocated automatically.
81 * On ppc64 we have to relocate the references explicitly with
82 * RELOC. (Note that strings count as static variables.)
84 * Because OF may have mapped I/O devices into the area starting at
85 * KERNELBASE, particularly on CHRP machines, we can't safely call
86 * OF once the kernel has been mapped to KERNELBASE. Therefore all
87 * OF calls must be done within prom_init().
89 * ADDR is used in calls to call_prom. The 4th and following
90 * arguments to call_prom should be 32-bit values.
91 * On ppc64, 64 bit values are truncated to 32 bits (and
92 * fortunately don't get interpreted as two arguments).
95 #define RELOC(x) (*PTRRELOC(&(x)))
96 #define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
97 #define OF_WORKAROUNDS 0
100 #define ADDR(x) (u32) (x)
101 #define OF_WORKAROUNDS of_workarounds
105 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
106 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
108 #define PROM_BUG() do { \
109 prom_printf("kernel BUG at %s line 0x%x!\n", \
110 RELOC(__FILE__), __LINE__); \
111 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
115 #define prom_debug(x...) prom_printf(x)
117 #define prom_debug(x...)
121 typedef u32 prom_arg_t;
139 struct mem_map_entry {
146 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
149 extern int enter_prom(struct prom_args *args, unsigned long entry);
151 static inline int enter_prom(struct prom_args *args, unsigned long entry)
153 return ((int (*)(struct prom_args *))entry)(args);
157 extern void copy_and_flush(unsigned long dest, unsigned long src,
158 unsigned long size, unsigned long offset);
161 static struct prom_t __initdata prom;
163 static unsigned long prom_entry __initdata;
165 #define PROM_SCRATCH_SIZE 256
167 static char __initdata of_stdout_device[256];
168 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
170 static unsigned long __initdata dt_header_start;
171 static unsigned long __initdata dt_struct_start, dt_struct_end;
172 static unsigned long __initdata dt_string_start, dt_string_end;
174 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
177 static int __initdata iommu_force_on;
178 static int __initdata ppc64_iommu_off;
179 static unsigned long __initdata prom_tce_alloc_start;
180 static unsigned long __initdata prom_tce_alloc_end;
183 static int __initdata of_platform;
185 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
187 static unsigned long __initdata prom_memory_limit;
189 static unsigned long __initdata alloc_top;
190 static unsigned long __initdata alloc_top_high;
191 static unsigned long __initdata alloc_bottom;
192 static unsigned long __initdata rmo_top;
193 static unsigned long __initdata ram_top;
196 static unsigned long __initdata prom_crashk_base;
197 static unsigned long __initdata prom_crashk_size;
200 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
201 static int __initdata mem_reserve_cnt;
203 static cell_t __initdata regbuf[1024];
206 #define MAX_CPU_THREADS 2
209 * Error results ... some OF calls will return "-1" on error, some
210 * will return 0, some will return either. To simplify, here are
211 * macros to use with any ihandle or phandle return value to check if
215 #define PROM_ERROR (-1u)
216 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
217 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
220 /* This is the one and *ONLY* place where we actually call open
224 static int __init call_prom(const char *service, int nargs, int nret, ...)
227 struct prom_args args;
230 args.service = ADDR(service);
234 va_start(list, nret);
235 for (i = 0; i < nargs; i++)
236 args.args[i] = va_arg(list, prom_arg_t);
239 for (i = 0; i < nret; i++)
240 args.args[nargs+i] = 0;
242 if (enter_prom(&args, RELOC(prom_entry)) < 0)
245 return (nret > 0) ? args.args[nargs] : 0;
248 static int __init call_prom_ret(const char *service, int nargs, int nret,
249 prom_arg_t *rets, ...)
252 struct prom_args args;
255 args.service = ADDR(service);
259 va_start(list, rets);
260 for (i = 0; i < nargs; i++)
261 args.args[i] = va_arg(list, prom_arg_t);
264 for (i = 0; i < nret; i++)
265 args.args[nargs+i] = 0;
267 if (enter_prom(&args, RELOC(prom_entry)) < 0)
271 for (i = 1; i < nret; ++i)
272 rets[i-1] = args.args[nargs+i];
274 return (nret > 0) ? args.args[nargs] : 0;
278 static void __init prom_print(const char *msg)
281 struct prom_t *_prom = &RELOC(prom);
283 if (_prom->stdout == 0)
286 for (p = msg; *p != 0; p = q) {
287 for (q = p; *q != 0 && *q != '\n'; ++q)
290 call_prom("write", 3, 1, _prom->stdout, p, q - p);
294 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
299 static void __init prom_print_hex(unsigned long val)
301 int i, nibbles = sizeof(val)*2;
302 char buf[sizeof(val)*2+1];
303 struct prom_t *_prom = &RELOC(prom);
305 for (i = nibbles-1; i >= 0; i--) {
306 buf[i] = (val & 0xf) + '0';
308 buf[i] += ('a'-'0'-10);
312 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
316 static void __init prom_printf(const char *format, ...)
318 const char *p, *q, *s;
321 struct prom_t *_prom = &RELOC(prom);
323 va_start(args, format);
325 format = PTRRELOC(format);
327 for (p = format; *p != 0; p = q) {
328 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
331 call_prom("write", 3, 1, _prom->stdout, p, q - p);
336 call_prom("write", 3, 1, _prom->stdout,
346 s = va_arg(args, const char *);
351 v = va_arg(args, unsigned long);
359 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
362 struct prom_t *_prom = &RELOC(prom);
364 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
366 * Old OF requires we claim physical and virtual separately
367 * and then map explicitly (assuming virtual mode)
372 ret = call_prom_ret("call-method", 5, 2, &result,
373 ADDR("claim"), _prom->memory,
375 if (ret != 0 || result == -1)
377 ret = call_prom_ret("call-method", 5, 2, &result,
378 ADDR("claim"), _prom->mmumap,
381 call_prom("call-method", 4, 1, ADDR("release"),
382 _prom->memory, size, virt);
385 /* the 0x12 is M (coherence) + PP == read/write */
386 call_prom("call-method", 6, 1,
387 ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
390 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
394 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
397 reason = PTRRELOC(reason);
400 /* ToDo: should put up an SRC here on p/iSeries */
401 call_prom("exit", 0, 0);
403 for (;;) /* should never get here */
408 static int __init prom_next_node(phandle *nodep)
412 if ((node = *nodep) != 0
413 && (*nodep = call_prom("child", 1, 1, node)) != 0)
415 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
418 if ((node = call_prom("parent", 1, 1, node)) == 0)
420 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
425 static int inline prom_getprop(phandle node, const char *pname,
426 void *value, size_t valuelen)
428 return call_prom("getprop", 4, 1, node, ADDR(pname),
429 (u32)(unsigned long) value, (u32) valuelen);
432 static int inline prom_getproplen(phandle node, const char *pname)
434 return call_prom("getproplen", 2, 1, node, ADDR(pname));
437 static void add_string(char **str, const char *q)
447 static char *tohex(unsigned int x)
449 static char digits[] = "0123456789abcdef";
450 static char result[9];
457 result[i] = digits[x & 0xf];
459 } while (x != 0 && i > 0);
463 static int __init prom_setprop(phandle node, const char *nodename,
464 const char *pname, void *value, size_t valuelen)
468 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
469 return call_prom("setprop", 4, 1, node, ADDR(pname),
470 (u32)(unsigned long) value, (u32) valuelen);
472 /* gah... setprop doesn't work on longtrail, have to use interpret */
474 add_string(&p, "dev");
475 add_string(&p, nodename);
476 add_string(&p, tohex((u32)(unsigned long) value));
477 add_string(&p, tohex(valuelen));
478 add_string(&p, tohex(ADDR(pname)));
479 add_string(&p, tohex(strlen(RELOC(pname))));
480 add_string(&p, "property");
482 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
485 /* We can't use the standard versions because of RELOC headaches. */
486 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
487 || ('a' <= (c) && (c) <= 'f') \
488 || ('A' <= (c) && (c) <= 'F'))
490 #define isdigit(c) ('0' <= (c) && (c) <= '9')
491 #define islower(c) ('a' <= (c) && (c) <= 'z')
492 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
494 unsigned long prom_strtoul(const char *cp, const char **endp)
496 unsigned long result = 0, base = 10, value;
501 if (toupper(*cp) == 'X') {
507 while (isxdigit(*cp) &&
508 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
509 result = result * base + value;
519 unsigned long prom_memparse(const char *ptr, const char **retptr)
521 unsigned long ret = prom_strtoul(ptr, retptr);
525 * We can't use a switch here because GCC *may* generate a
526 * jump table which won't work, because we're not running at
527 * the address we're linked at.
529 if ('G' == **retptr || 'g' == **retptr)
532 if ('M' == **retptr || 'm' == **retptr)
535 if ('K' == **retptr || 'k' == **retptr)
547 * Early parsing of the command line passed to the kernel, used for
548 * "mem=x" and the options that affect the iommu
550 static void __init early_cmdline_parse(void)
552 struct prom_t *_prom = &RELOC(prom);
557 RELOC(prom_cmd_line[0]) = 0;
558 p = RELOC(prom_cmd_line);
559 if ((long)_prom->chosen > 0)
560 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
561 #ifdef CONFIG_CMDLINE
562 if (l == 0) /* dbl check */
563 strlcpy(RELOC(prom_cmd_line),
564 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
565 #endif /* CONFIG_CMDLINE */
566 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
569 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
571 prom_printf("iommu opt is: %s\n", opt);
573 while (*opt && *opt == ' ')
575 if (!strncmp(opt, RELOC("off"), 3))
576 RELOC(ppc64_iommu_off) = 1;
577 else if (!strncmp(opt, RELOC("force"), 5))
578 RELOC(iommu_force_on) = 1;
582 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
585 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
587 /* Align to 16 MB == size of ppc64 large page */
588 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
594 * crashkernel=size@addr specifies the location to reserve for
597 opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
600 RELOC(prom_crashk_size) =
601 prom_memparse(opt, (const char **)&opt);
603 if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
604 RELOC(prom_crashk_size)) {
605 prom_printf("Warning: crashkernel size is not "
606 "aligned to 16MB\n");
610 * At present, the crash kernel always run at 32MB.
611 * Just ignore whatever user passed.
613 RELOC(prom_crashk_base) = 0x2000000;
615 prom_printf("Warning: PPC64 kdump kernel always runs "
622 #ifdef CONFIG_PPC_PSERIES
624 * To tell the firmware what our capabilities are, we have to pass
625 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
626 * that contain structures that contain the actual values.
628 static struct fake_elf {
635 char name[8]; /* "PowerPC" */
649 char name[24]; /* "IBM,RPA-Client-Config" */
663 .e_ident = { 0x7f, 'E', 'L', 'F',
664 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
665 .e_type = ET_EXEC, /* yeah right */
667 .e_version = EV_CURRENT,
668 .e_phoff = offsetof(struct fake_elf, phdr),
669 .e_phentsize = sizeof(Elf32_Phdr),
675 .p_offset = offsetof(struct fake_elf, chrpnote),
676 .p_filesz = sizeof(struct chrpnote)
679 .p_offset = offsetof(struct fake_elf, rpanote),
680 .p_filesz = sizeof(struct rpanote)
684 .namesz = sizeof("PowerPC"),
685 .descsz = sizeof(struct chrpdesc),
689 .real_mode = ~0U, /* ~0 means "don't care" */
698 .namesz = sizeof("IBM,RPA-Client-Config"),
699 .descsz = sizeof(struct rpadesc),
701 .name = "IBM,RPA-Client-Config",
704 .min_rmo_size = 64, /* in megabytes */
705 .min_rmo_percent = 0,
706 .max_pft_size = 48, /* 2^48 bytes max PFT size */
714 static void __init prom_send_capabilities(void)
718 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
719 if (elfloader == 0) {
720 prom_printf("couldn't open /packages/elf-loader\n");
723 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
724 elfloader, ADDR(&fake_elf));
725 call_prom("close", 1, 0, elfloader);
730 * Memory allocation strategy... our layout is normally:
732 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
733 * rare cases, initrd might end up being before the kernel though.
734 * We assume this won't override the final kernel at 0, we have no
735 * provision to handle that in this version, but it should hopefully
738 * alloc_top is set to the top of RMO, eventually shrink down if the
741 * alloc_bottom is set to the top of kernel/initrd
743 * from there, allocations are done this way : rtas is allocated
744 * topmost, and the device-tree is allocated from the bottom. We try
745 * to grow the device-tree allocation as we progress. If we can't,
746 * then we fail, we don't currently have a facility to restart
747 * elsewhere, but that shouldn't be necessary.
749 * Note that calls to reserve_mem have to be done explicitly, memory
750 * allocated with either alloc_up or alloc_down isn't automatically
756 * Allocates memory in the RMO upward from the kernel/initrd
758 * When align is 0, this is a special case, it means to allocate in place
759 * at the current location of alloc_bottom or fail (that is basically
760 * extending the previous allocation). Used for the device-tree flattening
762 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
764 unsigned long base = RELOC(alloc_bottom);
765 unsigned long addr = 0;
768 base = _ALIGN_UP(base, align);
769 prom_debug("alloc_up(%x, %x)\n", size, align);
770 if (RELOC(ram_top) == 0)
771 prom_panic("alloc_up() called with mem not initialized\n");
774 base = _ALIGN_UP(RELOC(alloc_bottom), align);
776 base = RELOC(alloc_bottom);
778 for(; (base + size) <= RELOC(alloc_top);
779 base = _ALIGN_UP(base + 0x100000, align)) {
780 prom_debug(" trying: 0x%x\n\r", base);
781 addr = (unsigned long)prom_claim(base, size, 0);
782 if (addr != PROM_ERROR && addr != 0)
790 RELOC(alloc_bottom) = addr;
792 prom_debug(" -> %x\n", addr);
793 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
794 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
795 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
796 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
797 prom_debug(" ram_top : %x\n", RELOC(ram_top));
803 * Allocates memory downward, either from top of RMO, or if highmem
804 * is set, from the top of RAM. Note that this one doesn't handle
805 * failures. It does claim memory if highmem is not set.
807 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
810 unsigned long base, addr = 0;
812 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
813 highmem ? RELOC("(high)") : RELOC("(low)"));
814 if (RELOC(ram_top) == 0)
815 prom_panic("alloc_down() called with mem not initialized\n");
818 /* Carve out storage for the TCE table. */
819 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
820 if (addr <= RELOC(alloc_bottom))
822 /* Will we bump into the RMO ? If yes, check out that we
823 * didn't overlap existing allocations there, if we did,
824 * we are dead, we must be the first in town !
826 if (addr < RELOC(rmo_top)) {
827 /* Good, we are first */
828 if (RELOC(alloc_top) == RELOC(rmo_top))
829 RELOC(alloc_top) = RELOC(rmo_top) = addr;
833 RELOC(alloc_top_high) = addr;
837 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
838 for (; base > RELOC(alloc_bottom);
839 base = _ALIGN_DOWN(base - 0x100000, align)) {
840 prom_debug(" trying: 0x%x\n\r", base);
841 addr = (unsigned long)prom_claim(base, size, 0);
842 if (addr != PROM_ERROR && addr != 0)
848 RELOC(alloc_top) = addr;
851 prom_debug(" -> %x\n", addr);
852 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
853 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
854 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
855 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
856 prom_debug(" ram_top : %x\n", RELOC(ram_top));
864 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
869 /* Ignore more than 2 cells */
870 while (s > sizeof(unsigned long) / 4) {
886 * Very dumb function for adding to the memory reserve list, but
887 * we don't need anything smarter at this point
889 * XXX Eventually check for collisions. They should NEVER happen.
890 * If problems seem to show up, it would be a good start to track
893 static void reserve_mem(u64 base, u64 size)
895 u64 top = base + size;
896 unsigned long cnt = RELOC(mem_reserve_cnt);
901 /* We need to always keep one empty entry so that we
902 * have our terminator with "size" set to 0 since we are
903 * dumb and just copy this entire array to the boot params
905 base = _ALIGN_DOWN(base, PAGE_SIZE);
906 top = _ALIGN_UP(top, PAGE_SIZE);
909 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
910 prom_panic("Memory reserve map exhausted !\n");
911 RELOC(mem_reserve_map)[cnt].base = base;
912 RELOC(mem_reserve_map)[cnt].size = size;
913 RELOC(mem_reserve_cnt) = cnt + 1;
917 * Initialize memory allocation mecanism, parse "memory" nodes and
918 * obtain that way the top of memory and RMO to setup out local allocator
920 static void __init prom_init_mem(void)
923 char *path, type[64];
926 struct prom_t *_prom = &RELOC(prom);
930 * We iterate the memory nodes to find
931 * 1) top of RMO (first node)
935 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
937 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
938 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
939 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
941 prom_debug("scanning memory:\n");
942 path = RELOC(prom_scratch);
944 for (node = 0; prom_next_node(&node); ) {
946 prom_getprop(node, "device_type", type, sizeof(type));
950 * CHRP Longtrail machines have no device_type
951 * on the memory node, so check the name instead...
953 prom_getprop(node, "name", type, sizeof(type));
955 if (strcmp(type, RELOC("memory")))
958 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
959 if (plen > sizeof(regbuf)) {
960 prom_printf("memory node too large for buffer !\n");
961 plen = sizeof(regbuf);
964 endp = p + (plen / sizeof(cell_t));
967 memset(path, 0, PROM_SCRATCH_SIZE);
968 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
969 prom_debug(" node %s :\n", path);
970 #endif /* DEBUG_PROM */
972 while ((endp - p) >= (rac + rsc)) {
973 unsigned long base, size;
975 base = prom_next_cell(rac, &p);
976 size = prom_next_cell(rsc, &p);
980 prom_debug(" %x %x\n", base, size);
981 if (base == 0 && (RELOC(of_platform) & PLATFORM_LPAR))
982 RELOC(rmo_top) = size;
983 if ((base + size) > RELOC(ram_top))
984 RELOC(ram_top) = base + size;
988 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
990 /* Check if we have an initrd after the kernel, if we do move our bottom
993 if (RELOC(prom_initrd_start)) {
994 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
995 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
999 * If prom_memory_limit is set we reduce the upper limits *except* for
1000 * alloc_top_high. This must be the real top of RAM so we can put
1004 RELOC(alloc_top_high) = RELOC(ram_top);
1006 if (RELOC(prom_memory_limit)) {
1007 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
1008 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1009 RELOC(prom_memory_limit));
1010 RELOC(prom_memory_limit) = 0;
1011 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
1012 prom_printf("Ignoring mem=%x >= ram_top.\n",
1013 RELOC(prom_memory_limit));
1014 RELOC(prom_memory_limit) = 0;
1016 RELOC(ram_top) = RELOC(prom_memory_limit);
1017 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
1022 * Setup our top alloc point, that is top of RMO or top of
1023 * segment 0 when running non-LPAR.
1024 * Some RS64 machines have buggy firmware where claims up at
1025 * 1GB fail. Cap at 768MB as a workaround.
1026 * Since 768MB is plenty of room, and we need to cap to something
1027 * reasonable on 32-bit, cap at 768MB on all machines.
1029 if (!RELOC(rmo_top))
1030 RELOC(rmo_top) = RELOC(ram_top);
1031 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
1032 RELOC(alloc_top) = RELOC(rmo_top);
1034 prom_printf("memory layout at init:\n");
1035 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
1036 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
1037 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
1038 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
1039 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
1040 prom_printf(" ram_top : %x\n", RELOC(ram_top));
1042 if (RELOC(prom_crashk_base)) {
1043 prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
1044 prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
1051 * Allocate room for and instantiate RTAS
1053 static void __init prom_instantiate_rtas(void)
1057 u32 base, entry = 0;
1060 prom_debug("prom_instantiate_rtas: start...\n");
1062 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1063 prom_debug("rtas_node: %x\n", rtas_node);
1064 if (!PHANDLE_VALID(rtas_node))
1067 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
1071 base = alloc_down(size, PAGE_SIZE, 0);
1073 prom_printf("RTAS allocation failed !\n");
1077 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1078 if (!IHANDLE_VALID(rtas_inst)) {
1079 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1083 prom_printf("instantiating rtas at 0x%x ...", base);
1085 if (call_prom_ret("call-method", 3, 2, &entry,
1086 ADDR("instantiate-rtas"),
1087 rtas_inst, base) != 0
1089 prom_printf(" failed\n");
1092 prom_printf(" done\n");
1094 reserve_mem(base, size);
1096 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1097 &base, sizeof(base));
1098 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1099 &entry, sizeof(entry));
1101 prom_debug("rtas base = 0x%x\n", base);
1102 prom_debug("rtas entry = 0x%x\n", entry);
1103 prom_debug("rtas size = 0x%x\n", (long)size);
1105 prom_debug("prom_instantiate_rtas: end...\n");
1110 * Allocate room for and initialize TCE tables
1112 static void __init prom_initialize_tce_table(void)
1116 char compatible[64], type[64], model[64];
1117 char *path = RELOC(prom_scratch);
1119 u32 minalign, minsize;
1120 u64 tce_entry, *tce_entryp;
1121 u64 local_alloc_top, local_alloc_bottom;
1124 if (RELOC(ppc64_iommu_off))
1127 prom_debug("starting prom_initialize_tce_table\n");
1129 /* Cache current top of allocs so we reserve a single block */
1130 local_alloc_top = RELOC(alloc_top_high);
1131 local_alloc_bottom = local_alloc_top;
1133 /* Search all nodes looking for PHBs. */
1134 for (node = 0; prom_next_node(&node); ) {
1138 prom_getprop(node, "compatible",
1139 compatible, sizeof(compatible));
1140 prom_getprop(node, "device_type", type, sizeof(type));
1141 prom_getprop(node, "model", model, sizeof(model));
1143 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1146 /* Keep the old logic in tack to avoid regression. */
1147 if (compatible[0] != 0) {
1148 if ((strstr(compatible, RELOC("python")) == NULL) &&
1149 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1150 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1152 } else if (model[0] != 0) {
1153 if ((strstr(model, RELOC("ython")) == NULL) &&
1154 (strstr(model, RELOC("peedwagon")) == NULL) &&
1155 (strstr(model, RELOC("innipeg")) == NULL))
1159 if (prom_getprop(node, "tce-table-minalign", &minalign,
1160 sizeof(minalign)) == PROM_ERROR)
1162 if (prom_getprop(node, "tce-table-minsize", &minsize,
1163 sizeof(minsize)) == PROM_ERROR)
1164 minsize = 4UL << 20;
1167 * Even though we read what OF wants, we just set the table
1168 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1169 * By doing this, we avoid the pitfalls of trying to DMA to
1170 * MMIO space and the DMA alias hole.
1172 * On POWER4, firmware sets the TCE region by assuming
1173 * each TCE table is 8MB. Using this memory for anything
1174 * else will impact performance, so we always allocate 8MB.
1177 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1178 minsize = 8UL << 20;
1180 minsize = 4UL << 20;
1182 /* Align to the greater of the align or size */
1183 align = max(minalign, minsize);
1184 base = alloc_down(minsize, align, 1);
1186 prom_panic("ERROR, cannot find space for TCE table.\n");
1187 if (base < local_alloc_bottom)
1188 local_alloc_bottom = base;
1190 /* It seems OF doesn't null-terminate the path :-( */
1191 memset(path, 0, sizeof(path));
1192 /* Call OF to setup the TCE hardware */
1193 if (call_prom("package-to-path", 3, 1, node,
1194 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1195 prom_printf("package-to-path failed\n");
1198 /* Save away the TCE table attributes for later use. */
1199 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1200 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1202 prom_debug("TCE table: %s\n", path);
1203 prom_debug("\tnode = 0x%x\n", node);
1204 prom_debug("\tbase = 0x%x\n", base);
1205 prom_debug("\tsize = 0x%x\n", minsize);
1207 /* Initialize the table to have a one-to-one mapping
1208 * over the allocated size.
1210 tce_entryp = (unsigned long *)base;
1211 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1212 tce_entry = (i << PAGE_SHIFT);
1214 *tce_entryp = tce_entry;
1217 prom_printf("opening PHB %s", path);
1218 phb_node = call_prom("open", 1, 1, path);
1220 prom_printf("... failed\n");
1222 prom_printf("... done\n");
1224 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1225 phb_node, -1, minsize,
1226 (u32) base, (u32) (base >> 32));
1227 call_prom("close", 1, 0, phb_node);
1230 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1232 if (RELOC(prom_memory_limit)) {
1234 * We align the start to a 16MB boundary so we can map
1235 * the TCE area using large pages if possible.
1236 * The end should be the top of RAM so no need to align it.
1238 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1240 RELOC(prom_tce_alloc_end) = local_alloc_top;
1243 /* Flag the first invalid entry */
1244 prom_debug("ending prom_initialize_tce_table\n");
1249 * With CHRP SMP we need to use the OF to start the other processors.
1250 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1251 * so we have to put the processors into a holding pattern controlled
1252 * by the kernel (not OF) before we destroy the OF.
1254 * This uses a chunk of low memory, puts some holding pattern
1255 * code there and sends the other processors off to there until
1256 * smp_boot_cpus tells them to do something. The holding pattern
1257 * checks that address until its cpu # is there, when it is that
1258 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1259 * of setting those values.
1261 * We also use physical address 0x4 here to tell when a cpu
1262 * is in its holding pattern code.
1266 extern void __secondary_hold(void);
1267 extern unsigned long __secondary_hold_spinloop;
1268 extern unsigned long __secondary_hold_acknowledge;
1271 * We want to reference the copy of __secondary_hold_* in the
1272 * 0 - 0x100 address range
1274 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1276 static void __init prom_hold_cpus(void)
1283 unsigned int interrupt_server[MAX_CPU_THREADS];
1284 unsigned int cpu_threads, hw_cpu_num;
1286 struct prom_t *_prom = &RELOC(prom);
1287 unsigned long *spinloop
1288 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1289 unsigned long *acknowledge
1290 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1292 /* __secondary_hold is actually a descriptor, not the text address */
1293 unsigned long secondary_hold
1294 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1296 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1299 prom_debug("prom_hold_cpus: start...\n");
1300 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1301 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1302 prom_debug(" 1) acknowledge = 0x%x\n",
1303 (unsigned long)acknowledge);
1304 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1305 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1307 /* Set the common spinloop variable, so all of the secondary cpus
1308 * will block when they are awakened from their OF spinloop.
1309 * This must occur for both SMP and non SMP kernels, since OF will
1310 * be trashed when we move the kernel.
1315 for (node = 0; prom_next_node(&node); ) {
1317 prom_getprop(node, "device_type", type, sizeof(type));
1318 if (strcmp(type, RELOC("cpu")) != 0)
1321 /* Skip non-configured cpus. */
1322 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1323 if (strcmp(type, RELOC("okay")) != 0)
1327 prom_getprop(node, "reg", ®, sizeof(reg));
1329 prom_debug("\ncpuid = 0x%x\n", cpuid);
1330 prom_debug("cpu hw idx = 0x%x\n", reg);
1332 /* Init the acknowledge var which will be reset by
1333 * the secondary cpu when it awakens from its OF
1336 *acknowledge = (unsigned long)-1;
1338 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1340 sizeof(interrupt_server));
1342 /* no property. old hardware has no SMT */
1344 interrupt_server[0] = reg; /* fake it with phys id */
1346 /* We have a threaded processor */
1347 cpu_threads = propsize / sizeof(u32);
1348 if (cpu_threads > MAX_CPU_THREADS) {
1349 prom_printf("SMT: too many threads!\n"
1350 "SMT: found %x, max is %x\n",
1351 cpu_threads, MAX_CPU_THREADS);
1352 cpu_threads = 1; /* ToDo: panic? */
1356 hw_cpu_num = interrupt_server[0];
1357 if (hw_cpu_num != _prom->cpu) {
1358 /* Primary Thread of non-boot cpu */
1359 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1360 call_prom("start-cpu", 3, 0, node,
1361 secondary_hold, reg);
1363 for (i = 0; (i < 100000000) &&
1364 (*acknowledge == ((unsigned long)-1)); i++ )
1367 if (*acknowledge == reg)
1368 prom_printf("done\n");
1370 prom_printf("failed: %x\n", *acknowledge);
1374 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1375 #endif /* CONFIG_SMP */
1377 /* Reserve cpu #s for secondary threads. They start later. */
1378 cpuid += cpu_threads;
1381 if (cpuid > NR_CPUS)
1382 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1383 ") exceeded: ignoring extras\n");
1385 prom_debug("prom_hold_cpus: end...\n");
1389 static void __init prom_init_client_services(unsigned long pp)
1391 struct prom_t *_prom = &RELOC(prom);
1393 /* Get a handle to the prom entry point before anything else */
1394 RELOC(prom_entry) = pp;
1396 /* get a handle for the stdout device */
1397 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1398 if (!PHANDLE_VALID(_prom->chosen))
1399 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1401 /* get device tree root */
1402 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1403 if (!PHANDLE_VALID(_prom->root))
1404 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1411 * For really old powermacs, we need to map things we claim.
1412 * For that, we need the ihandle of the mmu.
1413 * Also, on the longtrail, we need to work around other bugs.
1415 static void __init prom_find_mmu(void)
1417 struct prom_t *_prom = &RELOC(prom);
1421 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1422 if (!PHANDLE_VALID(oprom))
1424 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1426 version[sizeof(version) - 1] = 0;
1427 /* XXX might need to add other versions here */
1428 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1429 of_workarounds = OF_WA_CLAIM;
1430 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1431 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1432 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1435 _prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
1436 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1437 sizeof(_prom->mmumap));
1438 if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
1439 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1442 #define prom_find_mmu()
1445 static void __init prom_init_stdout(void)
1447 struct prom_t *_prom = &RELOC(prom);
1448 char *path = RELOC(of_stdout_device);
1452 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1453 prom_panic("cannot find stdout");
1455 _prom->stdout = val;
1457 /* Get the full OF pathname of the stdout device */
1458 memset(path, 0, 256);
1459 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1460 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1461 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
1463 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1464 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
1465 path, strlen(path) + 1);
1467 /* If it's a display, note it */
1468 memset(type, 0, sizeof(type));
1469 prom_getprop(val, "device_type", type, sizeof(type));
1470 if (strcmp(type, RELOC("display")) == 0)
1471 prom_setprop(val, path, "linux,boot-display", NULL, 0);
1474 static void __init prom_close_stdin(void)
1476 struct prom_t *_prom = &RELOC(prom);
1479 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1480 call_prom("close", 1, 0, val);
1483 static int __init prom_find_machine_type(void)
1485 struct prom_t *_prom = &RELOC(prom);
1491 len = prom_getprop(_prom->root, "compatible",
1492 compat, sizeof(compat)-1);
1496 char *p = &compat[i];
1500 if (strstr(p, RELOC("Power Macintosh")) ||
1501 strstr(p, RELOC("MacRISC")))
1502 return PLATFORM_POWERMAC;
1504 if (strstr(p, RELOC("Momentum,Maple")))
1505 return PLATFORM_MAPLE;
1506 if (strstr(p, RELOC("IBM,CPB")))
1507 return PLATFORM_CELL;
1513 /* Default to pSeries. We need to know if we are running LPAR */
1514 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1515 if (PHANDLE_VALID(rtas)) {
1516 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1517 if (x != PROM_ERROR) {
1518 prom_printf("Hypertas detected, assuming LPAR !\n");
1519 return PLATFORM_PSERIES_LPAR;
1522 return PLATFORM_PSERIES;
1524 return PLATFORM_CHRP;
1528 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1530 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1534 * If we have a display that we don't know how to drive,
1535 * we will want to try to execute OF's open method for it
1536 * later. However, OF will probably fall over if we do that
1537 * we've taken over the MMU.
1538 * So we check whether we will need to open the display,
1539 * and if so, open it now.
1541 static void __init prom_check_displays(void)
1543 char type[16], *path;
1548 static unsigned char default_colors[] = {
1566 const unsigned char *clut;
1568 prom_printf("Looking for displays\n");
1569 for (node = 0; prom_next_node(&node); ) {
1570 memset(type, 0, sizeof(type));
1571 prom_getprop(node, "device_type", type, sizeof(type));
1572 if (strcmp(type, RELOC("display")) != 0)
1575 /* It seems OF doesn't null-terminate the path :-( */
1576 path = RELOC(prom_scratch);
1577 memset(path, 0, PROM_SCRATCH_SIZE);
1580 * leave some room at the end of the path for appending extra
1583 if (call_prom("package-to-path", 3, 1, node, path,
1584 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1586 prom_printf("found display : %s, opening ... ", path);
1588 ih = call_prom("open", 1, 1, path);
1590 prom_printf("failed\n");
1595 prom_printf("done\n");
1596 prom_setprop(node, path, "linux,opened", NULL, 0);
1598 /* Setup a usable color table when the appropriate
1599 * method is available. Should update this to set-colors */
1600 clut = RELOC(default_colors);
1601 for (i = 0; i < 32; i++, clut += 3)
1602 if (prom_set_color(ih, i, clut[0], clut[1],
1606 #ifdef CONFIG_LOGO_LINUX_CLUT224
1607 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1608 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1609 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1612 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
1617 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1618 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1619 unsigned long needed, unsigned long align)
1623 *mem_start = _ALIGN(*mem_start, align);
1624 while ((*mem_start + needed) > *mem_end) {
1625 unsigned long room, chunk;
1627 prom_debug("Chunk exhausted, claiming more at %x...\n",
1628 RELOC(alloc_bottom));
1629 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1630 if (room > DEVTREE_CHUNK_SIZE)
1631 room = DEVTREE_CHUNK_SIZE;
1632 if (room < PAGE_SIZE)
1633 prom_panic("No memory for flatten_device_tree (no room)");
1634 chunk = alloc_up(room, 0);
1636 prom_panic("No memory for flatten_device_tree (claim failed)");
1637 *mem_end = RELOC(alloc_top);
1640 ret = (void *)*mem_start;
1641 *mem_start += needed;
1646 #define dt_push_token(token, mem_start, mem_end) \
1647 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1649 static unsigned long __init dt_find_string(char *str)
1653 s = os = (char *)RELOC(dt_string_start);
1655 while (s < (char *)RELOC(dt_string_end)) {
1656 if (strcmp(s, str) == 0)
1664 * The Open Firmware 1275 specification states properties must be 31 bytes or
1665 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1667 #define MAX_PROPERTY_NAME 64
1669 static void __init scan_dt_build_strings(phandle node,
1670 unsigned long *mem_start,
1671 unsigned long *mem_end)
1673 char *prev_name, *namep, *sstart;
1677 sstart = (char *)RELOC(dt_string_start);
1679 /* get and store all property names */
1680 prev_name = RELOC("");
1682 /* 64 is max len of name including nul. */
1683 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1684 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1685 /* No more nodes: unwind alloc */
1686 *mem_start = (unsigned long)namep;
1691 if (strcmp(namep, RELOC("name")) == 0) {
1692 *mem_start = (unsigned long)namep;
1693 prev_name = RELOC("name");
1696 /* get/create string entry */
1697 soff = dt_find_string(namep);
1699 *mem_start = (unsigned long)namep;
1700 namep = sstart + soff;
1702 /* Trim off some if we can */
1703 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1704 RELOC(dt_string_end) = *mem_start;
1709 /* do all our children */
1710 child = call_prom("child", 1, 1, node);
1711 while (child != 0) {
1712 scan_dt_build_strings(child, mem_start, mem_end);
1713 child = call_prom("peer", 1, 1, child);
1717 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1718 unsigned long *mem_end)
1721 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1723 unsigned char *valp;
1724 static char pname[MAX_PROPERTY_NAME];
1727 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1729 /* get the node's full name */
1730 namep = (char *)*mem_start;
1731 room = *mem_end - *mem_start;
1734 l = call_prom("package-to-path", 3, 1, node, namep, room);
1736 /* Didn't fit? Get more room. */
1738 if (l >= *mem_end - *mem_start)
1739 namep = make_room(mem_start, mem_end, l+1, 1);
1740 call_prom("package-to-path", 3, 1, node, namep, l);
1744 /* Fixup an Apple bug where they have bogus \0 chars in the
1745 * middle of the path in some properties, and extract
1746 * the unit name (everything after the last '/').
1748 for (lp = p = namep, ep = namep + l; p < ep; p++) {
1755 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
1758 /* get it again for debugging */
1759 path = RELOC(prom_scratch);
1760 memset(path, 0, PROM_SCRATCH_SIZE);
1761 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1763 /* get and store all properties */
1764 prev_name = RELOC("");
1765 sstart = (char *)RELOC(dt_string_start);
1767 if (call_prom("nextprop", 3, 1, node, prev_name,
1772 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1773 prev_name = RELOC("name");
1777 /* find string offset */
1778 soff = dt_find_string(RELOC(pname));
1780 prom_printf("WARNING: Can't find string index for"
1781 " <%s>, node %s\n", RELOC(pname), path);
1784 prev_name = sstart + soff;
1787 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1790 if (l == PROM_ERROR)
1792 if (l > MAX_PROPERTY_LENGTH) {
1793 prom_printf("WARNING: ignoring large property ");
1794 /* It seems OF doesn't null-terminate the path :-( */
1795 prom_printf("[%s] ", path);
1796 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1800 /* push property head */
1801 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1802 dt_push_token(l, mem_start, mem_end);
1803 dt_push_token(soff, mem_start, mem_end);
1805 /* push property content */
1806 valp = make_room(mem_start, mem_end, l, 4);
1807 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1808 *mem_start = _ALIGN(*mem_start, 4);
1811 /* Add a "linux,phandle" property. */
1812 soff = dt_find_string(RELOC("linux,phandle"));
1814 prom_printf("WARNING: Can't find string index for"
1815 " <linux-phandle> node %s\n", path);
1817 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1818 dt_push_token(4, mem_start, mem_end);
1819 dt_push_token(soff, mem_start, mem_end);
1820 valp = make_room(mem_start, mem_end, 4, 4);
1821 *(u32 *)valp = node;
1824 /* do all our children */
1825 child = call_prom("child", 1, 1, node);
1826 while (child != 0) {
1827 scan_dt_build_struct(child, mem_start, mem_end);
1828 child = call_prom("peer", 1, 1, child);
1831 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1834 static void __init flatten_device_tree(void)
1837 unsigned long mem_start, mem_end, room;
1838 struct boot_param_header *hdr;
1839 struct prom_t *_prom = &RELOC(prom);
1844 * Check how much room we have between alloc top & bottom (+/- a
1845 * few pages), crop to 4Mb, as this is our "chuck" size
1847 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1848 if (room > DEVTREE_CHUNK_SIZE)
1849 room = DEVTREE_CHUNK_SIZE;
1850 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1852 /* Now try to claim that */
1853 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1855 prom_panic("Can't allocate initial device-tree chunk\n");
1856 mem_end = RELOC(alloc_top);
1858 /* Get root of tree */
1859 root = call_prom("peer", 1, 1, (phandle)0);
1860 if (root == (phandle)0)
1861 prom_panic ("couldn't get device tree root\n");
1863 /* Build header and make room for mem rsv map */
1864 mem_start = _ALIGN(mem_start, 4);
1865 hdr = make_room(&mem_start, &mem_end,
1866 sizeof(struct boot_param_header), 4);
1867 RELOC(dt_header_start) = (unsigned long)hdr;
1868 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1870 /* Start of strings */
1871 mem_start = PAGE_ALIGN(mem_start);
1872 RELOC(dt_string_start) = mem_start;
1873 mem_start += 4; /* hole */
1875 /* Add "linux,phandle" in there, we'll need it */
1876 namep = make_room(&mem_start, &mem_end, 16, 1);
1877 strcpy(namep, RELOC("linux,phandle"));
1878 mem_start = (unsigned long)namep + strlen(namep) + 1;
1880 /* Build string array */
1881 prom_printf("Building dt strings...\n");
1882 scan_dt_build_strings(root, &mem_start, &mem_end);
1883 RELOC(dt_string_end) = mem_start;
1885 /* Build structure */
1886 mem_start = PAGE_ALIGN(mem_start);
1887 RELOC(dt_struct_start) = mem_start;
1888 prom_printf("Building dt structure...\n");
1889 scan_dt_build_struct(root, &mem_start, &mem_end);
1890 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1891 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1894 hdr->boot_cpuid_phys = _prom->cpu;
1895 hdr->magic = OF_DT_HEADER;
1896 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1897 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1898 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1899 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1900 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1901 hdr->version = OF_DT_VERSION;
1902 /* Version 16 is not backward compatible */
1903 hdr->last_comp_version = 0x10;
1905 /* Reserve the whole thing and copy the reserve map in, we
1906 * also bump mem_reserve_cnt to cause further reservations to
1907 * fail since it's too late.
1909 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1910 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1915 prom_printf("reserved memory map:\n");
1916 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1917 prom_printf(" %x - %x\n",
1918 RELOC(mem_reserve_map)[i].base,
1919 RELOC(mem_reserve_map)[i].size);
1922 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1924 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1925 RELOC(dt_string_start), RELOC(dt_string_end));
1926 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1927 RELOC(dt_struct_start), RELOC(dt_struct_end));
1932 static void __init fixup_device_tree(void)
1934 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1935 phandle u3, i2c, mpic;
1940 /* Some G5s have a missing interrupt definition, fix it up here */
1941 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1942 if (!PHANDLE_VALID(u3))
1944 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1945 if (!PHANDLE_VALID(i2c))
1947 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1948 if (!PHANDLE_VALID(mpic))
1951 /* check if proper rev of u3 */
1952 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1955 if (u3_rev < 0x35 || u3_rev > 0x39)
1957 /* does it need fixup ? */
1958 if (prom_getproplen(i2c, "interrupts") > 0)
1961 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1963 /* interrupt on this revision of u3 is number 0 and level */
1966 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
1967 &interrupts, sizeof(interrupts));
1969 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
1970 &parent, sizeof(parent));
1975 static void __init prom_find_boot_cpu(void)
1977 struct prom_t *_prom = &RELOC(prom);
1983 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1986 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1988 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1989 _prom->cpu = getprop_rval;
1991 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1994 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1996 #ifdef CONFIG_BLK_DEV_INITRD
1997 struct prom_t *_prom = &RELOC(prom);
1999 if (r3 && r4 && r4 != 0xdeadbeef) {
2002 RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
2003 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
2005 val = RELOC(prom_initrd_start);
2006 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
2008 val = RELOC(prom_initrd_end);
2009 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
2012 reserve_mem(RELOC(prom_initrd_start),
2013 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
2015 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
2016 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
2018 #endif /* CONFIG_BLK_DEV_INITRD */
2022 * We enter here early on, when the Open Firmware prom is still
2023 * handling exceptions and the MMU hash table for us.
2026 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2028 unsigned long r6, unsigned long r7)
2030 struct prom_t *_prom;
2033 unsigned long offset = reloc_offset();
2039 _prom = &RELOC(prom);
2042 * First zero the BSS
2044 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
2047 * Init interface to Open Firmware, get some node references,
2050 prom_init_client_services(pp);
2053 * See if this OF is old enough that we need to do explicit maps
2054 * and other workarounds
2059 * Init prom stdout device
2063 /* Bail if this is a kdump kernel. */
2064 if (PHYSICAL_START > 0)
2065 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2068 * Check for an initrd
2070 prom_check_initrd(r3, r4);
2073 * Get default machine type. At this point, we do not differentiate
2074 * between pSeries SMP and pSeries LPAR
2076 RELOC(of_platform) = prom_find_machine_type();
2077 getprop_rval = RELOC(of_platform);
2078 prom_setprop(_prom->chosen, "/chosen", "linux,platform",
2079 &getprop_rval, sizeof(getprop_rval));
2081 #ifdef CONFIG_PPC_PSERIES
2083 * On pSeries, inform the firmware about our capabilities
2085 if (RELOC(of_platform) == PLATFORM_PSERIES ||
2086 RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
2087 prom_send_capabilities();
2091 * Copy the CPU hold code
2093 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2094 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2097 * Do early parsing of command line
2099 early_cmdline_parse();
2102 * Initialize memory management within prom_init
2107 if (RELOC(prom_crashk_base))
2108 reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
2111 * Determine which cpu is actually running right _now_
2113 prom_find_boot_cpu();
2116 * Initialize display devices
2118 prom_check_displays();
2122 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2123 * that uses the allocator, we need to make sure we get the top of memory
2124 * available for us here...
2126 if (RELOC(of_platform) == PLATFORM_PSERIES)
2127 prom_initialize_tce_table();
2131 * On non-powermacs, try to instantiate RTAS and puts all CPUs
2132 * in spin-loops. PowerMacs don't have a working RTAS and use
2133 * a different way to spin CPUs
2135 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2136 prom_instantiate_rtas();
2141 * Fill in some infos for use by the kernel later on
2143 if (RELOC(prom_memory_limit))
2144 prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
2145 &RELOC(prom_memory_limit),
2146 sizeof(prom_memory_limit));
2148 if (RELOC(ppc64_iommu_off))
2149 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
2152 if (RELOC(iommu_force_on))
2153 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
2156 if (RELOC(prom_tce_alloc_start)) {
2157 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
2158 &RELOC(prom_tce_alloc_start),
2159 sizeof(prom_tce_alloc_start));
2160 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
2161 &RELOC(prom_tce_alloc_end),
2162 sizeof(prom_tce_alloc_end));
2167 if (RELOC(prom_crashk_base)) {
2168 prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
2169 PTRRELOC(&prom_crashk_base),
2170 sizeof(RELOC(prom_crashk_base)));
2171 prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
2172 PTRRELOC(&prom_crashk_size),
2173 sizeof(RELOC(prom_crashk_size)));
2177 * Fixup any known bugs in the device-tree
2179 fixup_device_tree();
2182 * Now finally create the flattened device-tree
2184 prom_printf("copying OF device tree ...\n");
2185 flatten_device_tree();
2188 * in case stdin is USB and still active on IBM machines...
2189 * Unfortunately quiesce crashes on some powermacs if we have
2190 * closed stdin already (in particular the powerbook 101).
2192 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2196 * Call OF "quiesce" method to shut down pending DMA's from
2199 prom_printf("Calling quiesce ...\n");
2200 call_prom("quiesce", 0, 0);
2203 * And finally, call the kernel passing it the flattened device
2204 * tree and NULL as r5, thus triggering the new entry point which
2205 * is common to us and kexec
2207 hdr = RELOC(dt_header_start);
2208 prom_printf("returning from prom_init\n");
2209 prom_debug("->dt_header_start=0x%x\n", hdr);
2212 reloc_got2(-offset);
2215 __start(hdr, KERNELBASE + offset, 0);