2 * Procedures for interfacing to Open Firmware.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stringify.h>
29 #include <linux/delay.h>
30 #include <linux/initrd.h>
31 #include <linux/bitops.h>
35 #include <asm/processor.h>
39 #include <asm/system.h>
41 #include <asm/pgtable.h>
43 #include <asm/iommu.h>
44 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/machdep.h>
48 #ifdef CONFIG_LOGO_LINUX_CLUT224
49 #include <linux/linux_logo.h>
50 extern const struct linux_logo logo_linux_clut224;
54 * Properties whose value is longer than this get excluded from our
55 * copy of the device tree. This value does need to be big enough to
56 * ensure that we don't lose things like the interrupt-map property
57 * on a PCI-PCI bridge.
59 #define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
62 * Eventually bump that one up
64 #define DEVTREE_CHUNK_SIZE 0x100000
67 * This is the size of the local memory reserve map that gets copied
68 * into the boot params passed to the kernel. That size is totally
69 * flexible as the kernel just reads the list until it encounters an
70 * entry with size 0, so it can be changed without breaking binary
73 #define MEM_RESERVE_MAP_SIZE 8
76 * prom_init() is called very early on, before the kernel text
77 * and data have been mapped to KERNELBASE. At this point the code
78 * is running at whatever address it has been loaded at.
79 * On ppc32 we compile with -mrelocatable, which means that references
80 * to extern and static variables get relocated automatically.
81 * On ppc64 we have to relocate the references explicitly with
82 * RELOC. (Note that strings count as static variables.)
84 * Because OF may have mapped I/O devices into the area starting at
85 * KERNELBASE, particularly on CHRP machines, we can't safely call
86 * OF once the kernel has been mapped to KERNELBASE. Therefore all
87 * OF calls must be done within prom_init().
89 * ADDR is used in calls to call_prom. The 4th and following
90 * arguments to call_prom should be 32-bit values.
91 * On ppc64, 64 bit values are truncated to 32 bits (and
92 * fortunately don't get interpreted as two arguments).
95 #define RELOC(x) (*PTRRELOC(&(x)))
96 #define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
97 #define OF_WORKAROUNDS 0
100 #define ADDR(x) (u32) (x)
101 #define OF_WORKAROUNDS of_workarounds
105 #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
106 #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
108 #define PROM_BUG() do { \
109 prom_printf("kernel BUG at %s line 0x%x!\n", \
110 RELOC(__FILE__), __LINE__); \
111 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
115 #define prom_debug(x...) prom_printf(x)
117 #define prom_debug(x...)
121 typedef u32 prom_arg_t;
139 struct mem_map_entry {
146 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
149 extern int enter_prom(struct prom_args *args, unsigned long entry);
151 static inline int enter_prom(struct prom_args *args, unsigned long entry)
153 return ((int (*)(struct prom_args *))entry)(args);
157 extern void copy_and_flush(unsigned long dest, unsigned long src,
158 unsigned long size, unsigned long offset);
161 static struct prom_t __initdata prom;
163 static unsigned long prom_entry __initdata;
165 #define PROM_SCRATCH_SIZE 256
167 static char __initdata of_stdout_device[256];
168 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
170 static unsigned long __initdata dt_header_start;
171 static unsigned long __initdata dt_struct_start, dt_struct_end;
172 static unsigned long __initdata dt_string_start, dt_string_end;
174 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
177 static int __initdata iommu_force_on;
178 static int __initdata ppc64_iommu_off;
179 static unsigned long __initdata prom_tce_alloc_start;
180 static unsigned long __initdata prom_tce_alloc_end;
183 static int __initdata of_platform;
185 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
187 static unsigned long __initdata prom_memory_limit;
189 static unsigned long __initdata alloc_top;
190 static unsigned long __initdata alloc_top_high;
191 static unsigned long __initdata alloc_bottom;
192 static unsigned long __initdata rmo_top;
193 static unsigned long __initdata ram_top;
196 static unsigned long __initdata prom_crashk_base;
197 static unsigned long __initdata prom_crashk_size;
200 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
201 static int __initdata mem_reserve_cnt;
203 static cell_t __initdata regbuf[1024];
206 #define MAX_CPU_THREADS 2
212 unsigned int threadid;
213 } hmt_thread_data[NR_CPUS];
214 #endif /* CONFIG_HMT */
217 * Error results ... some OF calls will return "-1" on error, some
218 * will return 0, some will return either. To simplify, here are
219 * macros to use with any ihandle or phandle return value to check if
223 #define PROM_ERROR (-1u)
224 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
225 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
228 /* This is the one and *ONLY* place where we actually call open
232 static int __init call_prom(const char *service, int nargs, int nret, ...)
235 struct prom_args args;
238 args.service = ADDR(service);
242 va_start(list, nret);
243 for (i = 0; i < nargs; i++)
244 args.args[i] = va_arg(list, prom_arg_t);
247 for (i = 0; i < nret; i++)
248 args.args[nargs+i] = 0;
250 if (enter_prom(&args, RELOC(prom_entry)) < 0)
253 return (nret > 0) ? args.args[nargs] : 0;
256 static int __init call_prom_ret(const char *service, int nargs, int nret,
257 prom_arg_t *rets, ...)
260 struct prom_args args;
263 args.service = ADDR(service);
267 va_start(list, rets);
268 for (i = 0; i < nargs; i++)
269 args.args[i] = va_arg(list, prom_arg_t);
272 for (i = 0; i < nret; i++)
273 args.args[nargs+i] = 0;
275 if (enter_prom(&args, RELOC(prom_entry)) < 0)
279 for (i = 1; i < nret; ++i)
280 rets[i-1] = args.args[nargs+i];
282 return (nret > 0) ? args.args[nargs] : 0;
286 static void __init prom_print(const char *msg)
289 struct prom_t *_prom = &RELOC(prom);
291 if (_prom->stdout == 0)
294 for (p = msg; *p != 0; p = q) {
295 for (q = p; *q != 0 && *q != '\n'; ++q)
298 call_prom("write", 3, 1, _prom->stdout, p, q - p);
302 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
307 static void __init prom_print_hex(unsigned long val)
309 int i, nibbles = sizeof(val)*2;
310 char buf[sizeof(val)*2+1];
311 struct prom_t *_prom = &RELOC(prom);
313 for (i = nibbles-1; i >= 0; i--) {
314 buf[i] = (val & 0xf) + '0';
316 buf[i] += ('a'-'0'-10);
320 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
324 static void __init prom_printf(const char *format, ...)
326 const char *p, *q, *s;
329 struct prom_t *_prom = &RELOC(prom);
331 va_start(args, format);
333 format = PTRRELOC(format);
335 for (p = format; *p != 0; p = q) {
336 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
339 call_prom("write", 3, 1, _prom->stdout, p, q - p);
344 call_prom("write", 3, 1, _prom->stdout,
354 s = va_arg(args, const char *);
359 v = va_arg(args, unsigned long);
367 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
370 struct prom_t *_prom = &RELOC(prom);
372 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
374 * Old OF requires we claim physical and virtual separately
375 * and then map explicitly (assuming virtual mode)
380 ret = call_prom_ret("call-method", 5, 2, &result,
381 ADDR("claim"), _prom->memory,
383 if (ret != 0 || result == -1)
385 ret = call_prom_ret("call-method", 5, 2, &result,
386 ADDR("claim"), _prom->mmumap,
389 call_prom("call-method", 4, 1, ADDR("release"),
390 _prom->memory, size, virt);
393 /* the 0x12 is M (coherence) + PP == read/write */
394 call_prom("call-method", 6, 1,
395 ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
398 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
402 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
405 reason = PTRRELOC(reason);
408 /* ToDo: should put up an SRC here on p/iSeries */
409 call_prom("exit", 0, 0);
411 for (;;) /* should never get here */
416 static int __init prom_next_node(phandle *nodep)
420 if ((node = *nodep) != 0
421 && (*nodep = call_prom("child", 1, 1, node)) != 0)
423 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
426 if ((node = call_prom("parent", 1, 1, node)) == 0)
428 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
433 static int inline prom_getprop(phandle node, const char *pname,
434 void *value, size_t valuelen)
436 return call_prom("getprop", 4, 1, node, ADDR(pname),
437 (u32)(unsigned long) value, (u32) valuelen);
440 static int inline prom_getproplen(phandle node, const char *pname)
442 return call_prom("getproplen", 2, 1, node, ADDR(pname));
445 static void add_string(char **str, const char *q)
455 static char *tohex(unsigned int x)
457 static char digits[] = "0123456789abcdef";
458 static char result[9];
465 result[i] = digits[x & 0xf];
467 } while (x != 0 && i > 0);
471 static int __init prom_setprop(phandle node, const char *nodename,
472 const char *pname, void *value, size_t valuelen)
476 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
477 return call_prom("setprop", 4, 1, node, ADDR(pname),
478 (u32)(unsigned long) value, (u32) valuelen);
480 /* gah... setprop doesn't work on longtrail, have to use interpret */
482 add_string(&p, "dev");
483 add_string(&p, nodename);
484 add_string(&p, tohex((u32)(unsigned long) value));
485 add_string(&p, tohex(valuelen));
486 add_string(&p, tohex(ADDR(pname)));
487 add_string(&p, tohex(strlen(RELOC(pname))));
488 add_string(&p, "property");
490 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
493 /* We can't use the standard versions because of RELOC headaches. */
494 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
495 || ('a' <= (c) && (c) <= 'f') \
496 || ('A' <= (c) && (c) <= 'F'))
498 #define isdigit(c) ('0' <= (c) && (c) <= '9')
499 #define islower(c) ('a' <= (c) && (c) <= 'z')
500 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
502 unsigned long prom_strtoul(const char *cp, const char **endp)
504 unsigned long result = 0, base = 10, value;
509 if (toupper(*cp) == 'X') {
515 while (isxdigit(*cp) &&
516 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
517 result = result * base + value;
527 unsigned long prom_memparse(const char *ptr, const char **retptr)
529 unsigned long ret = prom_strtoul(ptr, retptr);
533 * We can't use a switch here because GCC *may* generate a
534 * jump table which won't work, because we're not running at
535 * the address we're linked at.
537 if ('G' == **retptr || 'g' == **retptr)
540 if ('M' == **retptr || 'm' == **retptr)
543 if ('K' == **retptr || 'k' == **retptr)
555 * Early parsing of the command line passed to the kernel, used for
556 * "mem=x" and the options that affect the iommu
558 static void __init early_cmdline_parse(void)
560 struct prom_t *_prom = &RELOC(prom);
565 RELOC(prom_cmd_line[0]) = 0;
566 p = RELOC(prom_cmd_line);
567 if ((long)_prom->chosen > 0)
568 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
569 #ifdef CONFIG_CMDLINE
570 if (l == 0) /* dbl check */
571 strlcpy(RELOC(prom_cmd_line),
572 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
573 #endif /* CONFIG_CMDLINE */
574 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
577 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
579 prom_printf("iommu opt is: %s\n", opt);
581 while (*opt && *opt == ' ')
583 if (!strncmp(opt, RELOC("off"), 3))
584 RELOC(ppc64_iommu_off) = 1;
585 else if (!strncmp(opt, RELOC("force"), 5))
586 RELOC(iommu_force_on) = 1;
590 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
593 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
595 /* Align to 16 MB == size of ppc64 large page */
596 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
602 * crashkernel=size@addr specifies the location to reserve for
605 opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
608 RELOC(prom_crashk_size) =
609 prom_memparse(opt, (const char **)&opt);
611 if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
612 RELOC(prom_crashk_size)) {
613 prom_printf("Warning: crashkernel size is not "
614 "aligned to 16MB\n");
618 * At present, the crash kernel always run at 32MB.
619 * Just ignore whatever user passed.
621 RELOC(prom_crashk_base) = 0x2000000;
623 prom_printf("Warning: PPC64 kdump kernel always runs "
630 #ifdef CONFIG_PPC_PSERIES
632 * To tell the firmware what our capabilities are, we have to pass
633 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
634 * that contain structures that contain the actual values.
636 static struct fake_elf {
643 char name[8]; /* "PowerPC" */
657 char name[24]; /* "IBM,RPA-Client-Config" */
671 .e_ident = { 0x7f, 'E', 'L', 'F',
672 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
673 .e_type = ET_EXEC, /* yeah right */
675 .e_version = EV_CURRENT,
676 .e_phoff = offsetof(struct fake_elf, phdr),
677 .e_phentsize = sizeof(Elf32_Phdr),
683 .p_offset = offsetof(struct fake_elf, chrpnote),
684 .p_filesz = sizeof(struct chrpnote)
687 .p_offset = offsetof(struct fake_elf, rpanote),
688 .p_filesz = sizeof(struct rpanote)
692 .namesz = sizeof("PowerPC"),
693 .descsz = sizeof(struct chrpdesc),
697 .real_mode = ~0U, /* ~0 means "don't care" */
706 .namesz = sizeof("IBM,RPA-Client-Config"),
707 .descsz = sizeof(struct rpadesc),
709 .name = "IBM,RPA-Client-Config",
712 .min_rmo_size = 64, /* in megabytes */
713 .min_rmo_percent = 0,
714 .max_pft_size = 48, /* 2^48 bytes max PFT size */
722 static void __init prom_send_capabilities(void)
726 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
727 if (elfloader == 0) {
728 prom_printf("couldn't open /packages/elf-loader\n");
731 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
732 elfloader, ADDR(&fake_elf));
733 call_prom("close", 1, 0, elfloader);
738 * Memory allocation strategy... our layout is normally:
740 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
741 * rare cases, initrd might end up being before the kernel though.
742 * We assume this won't override the final kernel at 0, we have no
743 * provision to handle that in this version, but it should hopefully
746 * alloc_top is set to the top of RMO, eventually shrink down if the
749 * alloc_bottom is set to the top of kernel/initrd
751 * from there, allocations are done this way : rtas is allocated
752 * topmost, and the device-tree is allocated from the bottom. We try
753 * to grow the device-tree allocation as we progress. If we can't,
754 * then we fail, we don't currently have a facility to restart
755 * elsewhere, but that shouldn't be necessary.
757 * Note that calls to reserve_mem have to be done explicitly, memory
758 * allocated with either alloc_up or alloc_down isn't automatically
764 * Allocates memory in the RMO upward from the kernel/initrd
766 * When align is 0, this is a special case, it means to allocate in place
767 * at the current location of alloc_bottom or fail (that is basically
768 * extending the previous allocation). Used for the device-tree flattening
770 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
772 unsigned long base = RELOC(alloc_bottom);
773 unsigned long addr = 0;
776 base = _ALIGN_UP(base, align);
777 prom_debug("alloc_up(%x, %x)\n", size, align);
778 if (RELOC(ram_top) == 0)
779 prom_panic("alloc_up() called with mem not initialized\n");
782 base = _ALIGN_UP(RELOC(alloc_bottom), align);
784 base = RELOC(alloc_bottom);
786 for(; (base + size) <= RELOC(alloc_top);
787 base = _ALIGN_UP(base + 0x100000, align)) {
788 prom_debug(" trying: 0x%x\n\r", base);
789 addr = (unsigned long)prom_claim(base, size, 0);
790 if (addr != PROM_ERROR && addr != 0)
798 RELOC(alloc_bottom) = addr;
800 prom_debug(" -> %x\n", addr);
801 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
802 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
803 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
804 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
805 prom_debug(" ram_top : %x\n", RELOC(ram_top));
811 * Allocates memory downward, either from top of RMO, or if highmem
812 * is set, from the top of RAM. Note that this one doesn't handle
813 * failures. It does claim memory if highmem is not set.
815 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
818 unsigned long base, addr = 0;
820 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
821 highmem ? RELOC("(high)") : RELOC("(low)"));
822 if (RELOC(ram_top) == 0)
823 prom_panic("alloc_down() called with mem not initialized\n");
826 /* Carve out storage for the TCE table. */
827 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
828 if (addr <= RELOC(alloc_bottom))
830 /* Will we bump into the RMO ? If yes, check out that we
831 * didn't overlap existing allocations there, if we did,
832 * we are dead, we must be the first in town !
834 if (addr < RELOC(rmo_top)) {
835 /* Good, we are first */
836 if (RELOC(alloc_top) == RELOC(rmo_top))
837 RELOC(alloc_top) = RELOC(rmo_top) = addr;
841 RELOC(alloc_top_high) = addr;
845 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
846 for (; base > RELOC(alloc_bottom);
847 base = _ALIGN_DOWN(base - 0x100000, align)) {
848 prom_debug(" trying: 0x%x\n\r", base);
849 addr = (unsigned long)prom_claim(base, size, 0);
850 if (addr != PROM_ERROR && addr != 0)
856 RELOC(alloc_top) = addr;
859 prom_debug(" -> %x\n", addr);
860 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
861 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
862 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
863 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
864 prom_debug(" ram_top : %x\n", RELOC(ram_top));
872 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
877 /* Ignore more than 2 cells */
878 while (s > sizeof(unsigned long) / 4) {
894 * Very dumb function for adding to the memory reserve list, but
895 * we don't need anything smarter at this point
897 * XXX Eventually check for collisions. They should NEVER happen.
898 * If problems seem to show up, it would be a good start to track
901 static void reserve_mem(u64 base, u64 size)
903 u64 top = base + size;
904 unsigned long cnt = RELOC(mem_reserve_cnt);
909 /* We need to always keep one empty entry so that we
910 * have our terminator with "size" set to 0 since we are
911 * dumb and just copy this entire array to the boot params
913 base = _ALIGN_DOWN(base, PAGE_SIZE);
914 top = _ALIGN_UP(top, PAGE_SIZE);
917 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
918 prom_panic("Memory reserve map exhausted !\n");
919 RELOC(mem_reserve_map)[cnt].base = base;
920 RELOC(mem_reserve_map)[cnt].size = size;
921 RELOC(mem_reserve_cnt) = cnt + 1;
925 * Initialize memory allocation mecanism, parse "memory" nodes and
926 * obtain that way the top of memory and RMO to setup out local allocator
928 static void __init prom_init_mem(void)
931 char *path, type[64];
934 struct prom_t *_prom = &RELOC(prom);
938 * We iterate the memory nodes to find
939 * 1) top of RMO (first node)
943 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
945 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
946 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
947 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
949 prom_debug("scanning memory:\n");
950 path = RELOC(prom_scratch);
952 for (node = 0; prom_next_node(&node); ) {
954 prom_getprop(node, "device_type", type, sizeof(type));
958 * CHRP Longtrail machines have no device_type
959 * on the memory node, so check the name instead...
961 prom_getprop(node, "name", type, sizeof(type));
963 if (strcmp(type, RELOC("memory")))
966 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
967 if (plen > sizeof(regbuf)) {
968 prom_printf("memory node too large for buffer !\n");
969 plen = sizeof(regbuf);
972 endp = p + (plen / sizeof(cell_t));
975 memset(path, 0, PROM_SCRATCH_SIZE);
976 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
977 prom_debug(" node %s :\n", path);
978 #endif /* DEBUG_PROM */
980 while ((endp - p) >= (rac + rsc)) {
981 unsigned long base, size;
983 base = prom_next_cell(rac, &p);
984 size = prom_next_cell(rsc, &p);
988 prom_debug(" %x %x\n", base, size);
990 RELOC(rmo_top) = size;
991 if ((base + size) > RELOC(ram_top))
992 RELOC(ram_top) = base + size;
996 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
998 /* Check if we have an initrd after the kernel, if we do move our bottom
1001 if (RELOC(prom_initrd_start)) {
1002 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
1003 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
1007 * If prom_memory_limit is set we reduce the upper limits *except* for
1008 * alloc_top_high. This must be the real top of RAM so we can put
1012 RELOC(alloc_top_high) = RELOC(ram_top);
1014 if (RELOC(prom_memory_limit)) {
1015 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
1016 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1017 RELOC(prom_memory_limit));
1018 RELOC(prom_memory_limit) = 0;
1019 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
1020 prom_printf("Ignoring mem=%x >= ram_top.\n",
1021 RELOC(prom_memory_limit));
1022 RELOC(prom_memory_limit) = 0;
1024 RELOC(ram_top) = RELOC(prom_memory_limit);
1025 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
1030 * Setup our top alloc point, that is top of RMO or top of
1031 * segment 0 when running non-LPAR.
1032 * Some RS64 machines have buggy firmware where claims up at
1033 * 1GB fail. Cap at 768MB as a workaround.
1034 * Since 768MB is plenty of room, and we need to cap to something
1035 * reasonable on 32-bit, cap at 768MB on all machines.
1037 if (!RELOC(rmo_top))
1038 RELOC(rmo_top) = RELOC(ram_top);
1039 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
1040 RELOC(alloc_top) = RELOC(rmo_top);
1042 prom_printf("memory layout at init:\n");
1043 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
1044 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
1045 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
1046 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
1047 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
1048 prom_printf(" ram_top : %x\n", RELOC(ram_top));
1050 if (RELOC(prom_crashk_base)) {
1051 prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
1052 prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
1059 * Allocate room for and instantiate RTAS
1061 static void __init prom_instantiate_rtas(void)
1065 u32 base, entry = 0;
1068 prom_debug("prom_instantiate_rtas: start...\n");
1070 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1071 prom_debug("rtas_node: %x\n", rtas_node);
1072 if (!PHANDLE_VALID(rtas_node))
1075 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
1079 base = alloc_down(size, PAGE_SIZE, 0);
1081 prom_printf("RTAS allocation failed !\n");
1085 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1086 if (!IHANDLE_VALID(rtas_inst)) {
1087 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1091 prom_printf("instantiating rtas at 0x%x ...", base);
1093 if (call_prom_ret("call-method", 3, 2, &entry,
1094 ADDR("instantiate-rtas"),
1095 rtas_inst, base) != 0
1097 prom_printf(" failed\n");
1100 prom_printf(" done\n");
1102 reserve_mem(base, size);
1104 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1105 &base, sizeof(base));
1106 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1107 &entry, sizeof(entry));
1109 prom_debug("rtas base = 0x%x\n", base);
1110 prom_debug("rtas entry = 0x%x\n", entry);
1111 prom_debug("rtas size = 0x%x\n", (long)size);
1113 prom_debug("prom_instantiate_rtas: end...\n");
1118 * Allocate room for and initialize TCE tables
1120 static void __init prom_initialize_tce_table(void)
1124 char compatible[64], type[64], model[64];
1125 char *path = RELOC(prom_scratch);
1127 u32 minalign, minsize;
1128 u64 tce_entry, *tce_entryp;
1129 u64 local_alloc_top, local_alloc_bottom;
1132 if (RELOC(ppc64_iommu_off))
1135 prom_debug("starting prom_initialize_tce_table\n");
1137 /* Cache current top of allocs so we reserve a single block */
1138 local_alloc_top = RELOC(alloc_top_high);
1139 local_alloc_bottom = local_alloc_top;
1141 /* Search all nodes looking for PHBs. */
1142 for (node = 0; prom_next_node(&node); ) {
1146 prom_getprop(node, "compatible",
1147 compatible, sizeof(compatible));
1148 prom_getprop(node, "device_type", type, sizeof(type));
1149 prom_getprop(node, "model", model, sizeof(model));
1151 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1154 /* Keep the old logic in tack to avoid regression. */
1155 if (compatible[0] != 0) {
1156 if ((strstr(compatible, RELOC("python")) == NULL) &&
1157 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1158 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1160 } else if (model[0] != 0) {
1161 if ((strstr(model, RELOC("ython")) == NULL) &&
1162 (strstr(model, RELOC("peedwagon")) == NULL) &&
1163 (strstr(model, RELOC("innipeg")) == NULL))
1167 if (prom_getprop(node, "tce-table-minalign", &minalign,
1168 sizeof(minalign)) == PROM_ERROR)
1170 if (prom_getprop(node, "tce-table-minsize", &minsize,
1171 sizeof(minsize)) == PROM_ERROR)
1172 minsize = 4UL << 20;
1175 * Even though we read what OF wants, we just set the table
1176 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1177 * By doing this, we avoid the pitfalls of trying to DMA to
1178 * MMIO space and the DMA alias hole.
1180 * On POWER4, firmware sets the TCE region by assuming
1181 * each TCE table is 8MB. Using this memory for anything
1182 * else will impact performance, so we always allocate 8MB.
1185 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1186 minsize = 8UL << 20;
1188 minsize = 4UL << 20;
1190 /* Align to the greater of the align or size */
1191 align = max(minalign, minsize);
1192 base = alloc_down(minsize, align, 1);
1194 prom_panic("ERROR, cannot find space for TCE table.\n");
1195 if (base < local_alloc_bottom)
1196 local_alloc_bottom = base;
1198 /* It seems OF doesn't null-terminate the path :-( */
1199 memset(path, 0, sizeof(path));
1200 /* Call OF to setup the TCE hardware */
1201 if (call_prom("package-to-path", 3, 1, node,
1202 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1203 prom_printf("package-to-path failed\n");
1206 /* Save away the TCE table attributes for later use. */
1207 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1208 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1210 prom_debug("TCE table: %s\n", path);
1211 prom_debug("\tnode = 0x%x\n", node);
1212 prom_debug("\tbase = 0x%x\n", base);
1213 prom_debug("\tsize = 0x%x\n", minsize);
1215 /* Initialize the table to have a one-to-one mapping
1216 * over the allocated size.
1218 tce_entryp = (unsigned long *)base;
1219 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1220 tce_entry = (i << PAGE_SHIFT);
1222 *tce_entryp = tce_entry;
1225 prom_printf("opening PHB %s", path);
1226 phb_node = call_prom("open", 1, 1, path);
1228 prom_printf("... failed\n");
1230 prom_printf("... done\n");
1232 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1233 phb_node, -1, minsize,
1234 (u32) base, (u32) (base >> 32));
1235 call_prom("close", 1, 0, phb_node);
1238 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1240 if (RELOC(prom_memory_limit)) {
1242 * We align the start to a 16MB boundary so we can map
1243 * the TCE area using large pages if possible.
1244 * The end should be the top of RAM so no need to align it.
1246 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1248 RELOC(prom_tce_alloc_end) = local_alloc_top;
1251 /* Flag the first invalid entry */
1252 prom_debug("ending prom_initialize_tce_table\n");
1257 * With CHRP SMP we need to use the OF to start the other processors.
1258 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1259 * so we have to put the processors into a holding pattern controlled
1260 * by the kernel (not OF) before we destroy the OF.
1262 * This uses a chunk of low memory, puts some holding pattern
1263 * code there and sends the other processors off to there until
1264 * smp_boot_cpus tells them to do something. The holding pattern
1265 * checks that address until its cpu # is there, when it is that
1266 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1267 * of setting those values.
1269 * We also use physical address 0x4 here to tell when a cpu
1270 * is in its holding pattern code.
1274 extern void __secondary_hold(void);
1275 extern unsigned long __secondary_hold_spinloop;
1276 extern unsigned long __secondary_hold_acknowledge;
1279 * We want to reference the copy of __secondary_hold_* in the
1280 * 0 - 0x100 address range
1282 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1284 static void __init prom_hold_cpus(void)
1291 unsigned int interrupt_server[MAX_CPU_THREADS];
1292 unsigned int cpu_threads, hw_cpu_num;
1294 struct prom_t *_prom = &RELOC(prom);
1295 unsigned long *spinloop
1296 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1297 unsigned long *acknowledge
1298 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1300 /* __secondary_hold is actually a descriptor, not the text address */
1301 unsigned long secondary_hold
1302 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1304 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1307 prom_debug("prom_hold_cpus: start...\n");
1308 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1309 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1310 prom_debug(" 1) acknowledge = 0x%x\n",
1311 (unsigned long)acknowledge);
1312 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1313 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1315 /* Set the common spinloop variable, so all of the secondary cpus
1316 * will block when they are awakened from their OF spinloop.
1317 * This must occur for both SMP and non SMP kernels, since OF will
1318 * be trashed when we move the kernel.
1323 for (i = 0; i < NR_CPUS; i++)
1324 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1327 for (node = 0; prom_next_node(&node); ) {
1329 prom_getprop(node, "device_type", type, sizeof(type));
1330 if (strcmp(type, RELOC("cpu")) != 0)
1333 /* Skip non-configured cpus. */
1334 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1335 if (strcmp(type, RELOC("okay")) != 0)
1339 prom_getprop(node, "reg", ®, sizeof(reg));
1341 prom_debug("\ncpuid = 0x%x\n", cpuid);
1342 prom_debug("cpu hw idx = 0x%x\n", reg);
1344 /* Init the acknowledge var which will be reset by
1345 * the secondary cpu when it awakens from its OF
1348 *acknowledge = (unsigned long)-1;
1350 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1352 sizeof(interrupt_server));
1354 /* no property. old hardware has no SMT */
1356 interrupt_server[0] = reg; /* fake it with phys id */
1358 /* We have a threaded processor */
1359 cpu_threads = propsize / sizeof(u32);
1360 if (cpu_threads > MAX_CPU_THREADS) {
1361 prom_printf("SMT: too many threads!\n"
1362 "SMT: found %x, max is %x\n",
1363 cpu_threads, MAX_CPU_THREADS);
1364 cpu_threads = 1; /* ToDo: panic? */
1368 hw_cpu_num = interrupt_server[0];
1369 if (hw_cpu_num != _prom->cpu) {
1370 /* Primary Thread of non-boot cpu */
1371 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1372 call_prom("start-cpu", 3, 0, node,
1373 secondary_hold, reg);
1375 for (i = 0; (i < 100000000) &&
1376 (*acknowledge == ((unsigned long)-1)); i++ )
1379 if (*acknowledge == reg)
1380 prom_printf("done\n");
1382 prom_printf("failed: %x\n", *acknowledge);
1386 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1387 #endif /* CONFIG_SMP */
1389 /* Reserve cpu #s for secondary threads. They start later. */
1390 cpuid += cpu_threads;
1393 /* Only enable HMT on processors that provide support. */
1394 if (__is_processor(PV_PULSAR) ||
1395 __is_processor(PV_ICESTAR) ||
1396 __is_processor(PV_SSTAR)) {
1397 prom_printf(" starting secondary threads\n");
1399 for (i = 0; i < NR_CPUS; i += 2) {
1404 unsigned long pir = mfspr(SPRN_PIR);
1405 if (__is_processor(PV_PULSAR)) {
1406 RELOC(hmt_thread_data)[i].pir =
1409 RELOC(hmt_thread_data)[i].pir =
1415 prom_printf("Processor is not HMT capable\n");
1419 if (cpuid > NR_CPUS)
1420 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1421 ") exceeded: ignoring extras\n");
1423 prom_debug("prom_hold_cpus: end...\n");
1427 static void __init prom_init_client_services(unsigned long pp)
1429 struct prom_t *_prom = &RELOC(prom);
1431 /* Get a handle to the prom entry point before anything else */
1432 RELOC(prom_entry) = pp;
1434 /* get a handle for the stdout device */
1435 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1436 if (!PHANDLE_VALID(_prom->chosen))
1437 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1439 /* get device tree root */
1440 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1441 if (!PHANDLE_VALID(_prom->root))
1442 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1449 * For really old powermacs, we need to map things we claim.
1450 * For that, we need the ihandle of the mmu.
1451 * Also, on the longtrail, we need to work around other bugs.
1453 static void __init prom_find_mmu(void)
1455 struct prom_t *_prom = &RELOC(prom);
1459 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1460 if (!PHANDLE_VALID(oprom))
1462 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1464 version[sizeof(version) - 1] = 0;
1465 /* XXX might need to add other versions here */
1466 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1467 of_workarounds = OF_WA_CLAIM;
1468 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1469 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1470 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1473 _prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
1474 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1475 sizeof(_prom->mmumap));
1476 if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
1477 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1480 #define prom_find_mmu()
1483 static void __init prom_init_stdout(void)
1485 struct prom_t *_prom = &RELOC(prom);
1486 char *path = RELOC(of_stdout_device);
1490 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1491 prom_panic("cannot find stdout");
1493 _prom->stdout = val;
1495 /* Get the full OF pathname of the stdout device */
1496 memset(path, 0, 256);
1497 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1498 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1499 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
1501 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1502 prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
1503 path, strlen(path) + 1);
1505 /* If it's a display, note it */
1506 memset(type, 0, sizeof(type));
1507 prom_getprop(val, "device_type", type, sizeof(type));
1508 if (strcmp(type, RELOC("display")) == 0)
1509 prom_setprop(val, path, "linux,boot-display", NULL, 0);
1512 static void __init prom_close_stdin(void)
1514 struct prom_t *_prom = &RELOC(prom);
1517 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1518 call_prom("close", 1, 0, val);
1521 static int __init prom_find_machine_type(void)
1523 struct prom_t *_prom = &RELOC(prom);
1529 len = prom_getprop(_prom->root, "compatible",
1530 compat, sizeof(compat)-1);
1534 char *p = &compat[i];
1538 if (strstr(p, RELOC("Power Macintosh")) ||
1539 strstr(p, RELOC("MacRISC")))
1540 return PLATFORM_POWERMAC;
1542 if (strstr(p, RELOC("Momentum,Maple")))
1543 return PLATFORM_MAPLE;
1544 if (strstr(p, RELOC("IBM,CPB")))
1545 return PLATFORM_CELL;
1551 /* Default to pSeries. We need to know if we are running LPAR */
1552 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1553 if (PHANDLE_VALID(rtas)) {
1554 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1555 if (x != PROM_ERROR) {
1556 prom_printf("Hypertas detected, assuming LPAR !\n");
1557 return PLATFORM_PSERIES_LPAR;
1560 return PLATFORM_PSERIES;
1562 return PLATFORM_CHRP;
1566 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1568 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1572 * If we have a display that we don't know how to drive,
1573 * we will want to try to execute OF's open method for it
1574 * later. However, OF will probably fall over if we do that
1575 * we've taken over the MMU.
1576 * So we check whether we will need to open the display,
1577 * and if so, open it now.
1579 static void __init prom_check_displays(void)
1581 char type[16], *path;
1586 static unsigned char default_colors[] = {
1604 const unsigned char *clut;
1606 prom_printf("Looking for displays\n");
1607 for (node = 0; prom_next_node(&node); ) {
1608 memset(type, 0, sizeof(type));
1609 prom_getprop(node, "device_type", type, sizeof(type));
1610 if (strcmp(type, RELOC("display")) != 0)
1613 /* It seems OF doesn't null-terminate the path :-( */
1614 path = RELOC(prom_scratch);
1615 memset(path, 0, PROM_SCRATCH_SIZE);
1618 * leave some room at the end of the path for appending extra
1621 if (call_prom("package-to-path", 3, 1, node, path,
1622 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1624 prom_printf("found display : %s, opening ... ", path);
1626 ih = call_prom("open", 1, 1, path);
1628 prom_printf("failed\n");
1633 prom_printf("done\n");
1634 prom_setprop(node, path, "linux,opened", NULL, 0);
1636 /* Setup a usable color table when the appropriate
1637 * method is available. Should update this to set-colors */
1638 clut = RELOC(default_colors);
1639 for (i = 0; i < 32; i++, clut += 3)
1640 if (prom_set_color(ih, i, clut[0], clut[1],
1644 #ifdef CONFIG_LOGO_LINUX_CLUT224
1645 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1646 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1647 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1650 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
1655 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1656 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1657 unsigned long needed, unsigned long align)
1661 *mem_start = _ALIGN(*mem_start, align);
1662 while ((*mem_start + needed) > *mem_end) {
1663 unsigned long room, chunk;
1665 prom_debug("Chunk exhausted, claiming more at %x...\n",
1666 RELOC(alloc_bottom));
1667 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1668 if (room > DEVTREE_CHUNK_SIZE)
1669 room = DEVTREE_CHUNK_SIZE;
1670 if (room < PAGE_SIZE)
1671 prom_panic("No memory for flatten_device_tree (no room)");
1672 chunk = alloc_up(room, 0);
1674 prom_panic("No memory for flatten_device_tree (claim failed)");
1675 *mem_end = RELOC(alloc_top);
1678 ret = (void *)*mem_start;
1679 *mem_start += needed;
1684 #define dt_push_token(token, mem_start, mem_end) \
1685 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1687 static unsigned long __init dt_find_string(char *str)
1691 s = os = (char *)RELOC(dt_string_start);
1693 while (s < (char *)RELOC(dt_string_end)) {
1694 if (strcmp(s, str) == 0)
1702 * The Open Firmware 1275 specification states properties must be 31 bytes or
1703 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1705 #define MAX_PROPERTY_NAME 64
1707 static void __init scan_dt_build_strings(phandle node,
1708 unsigned long *mem_start,
1709 unsigned long *mem_end)
1711 char *prev_name, *namep, *sstart;
1715 sstart = (char *)RELOC(dt_string_start);
1717 /* get and store all property names */
1718 prev_name = RELOC("");
1720 /* 64 is max len of name including nul. */
1721 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1722 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1723 /* No more nodes: unwind alloc */
1724 *mem_start = (unsigned long)namep;
1729 if (strcmp(namep, RELOC("name")) == 0) {
1730 *mem_start = (unsigned long)namep;
1731 prev_name = RELOC("name");
1734 /* get/create string entry */
1735 soff = dt_find_string(namep);
1737 *mem_start = (unsigned long)namep;
1738 namep = sstart + soff;
1740 /* Trim off some if we can */
1741 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1742 RELOC(dt_string_end) = *mem_start;
1747 /* do all our children */
1748 child = call_prom("child", 1, 1, node);
1749 while (child != 0) {
1750 scan_dt_build_strings(child, mem_start, mem_end);
1751 child = call_prom("peer", 1, 1, child);
1755 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1756 unsigned long *mem_end)
1759 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1761 unsigned char *valp;
1762 static char pname[MAX_PROPERTY_NAME];
1765 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1767 /* get the node's full name */
1768 namep = (char *)*mem_start;
1769 room = *mem_end - *mem_start;
1772 l = call_prom("package-to-path", 3, 1, node, namep, room);
1774 /* Didn't fit? Get more room. */
1776 if (l >= *mem_end - *mem_start)
1777 namep = make_room(mem_start, mem_end, l+1, 1);
1778 call_prom("package-to-path", 3, 1, node, namep, l);
1782 /* Fixup an Apple bug where they have bogus \0 chars in the
1783 * middle of the path in some properties, and extract
1784 * the unit name (everything after the last '/').
1786 for (lp = p = namep, ep = namep + l; p < ep; p++) {
1793 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
1796 /* get it again for debugging */
1797 path = RELOC(prom_scratch);
1798 memset(path, 0, PROM_SCRATCH_SIZE);
1799 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1801 /* get and store all properties */
1802 prev_name = RELOC("");
1803 sstart = (char *)RELOC(dt_string_start);
1805 if (call_prom("nextprop", 3, 1, node, prev_name,
1810 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1811 prev_name = RELOC("name");
1815 /* find string offset */
1816 soff = dt_find_string(RELOC(pname));
1818 prom_printf("WARNING: Can't find string index for"
1819 " <%s>, node %s\n", RELOC(pname), path);
1822 prev_name = sstart + soff;
1825 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1828 if (l == PROM_ERROR)
1830 if (l > MAX_PROPERTY_LENGTH) {
1831 prom_printf("WARNING: ignoring large property ");
1832 /* It seems OF doesn't null-terminate the path :-( */
1833 prom_printf("[%s] ", path);
1834 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1838 /* push property head */
1839 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1840 dt_push_token(l, mem_start, mem_end);
1841 dt_push_token(soff, mem_start, mem_end);
1843 /* push property content */
1844 valp = make_room(mem_start, mem_end, l, 4);
1845 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1846 *mem_start = _ALIGN(*mem_start, 4);
1849 /* Add a "linux,phandle" property. */
1850 soff = dt_find_string(RELOC("linux,phandle"));
1852 prom_printf("WARNING: Can't find string index for"
1853 " <linux-phandle> node %s\n", path);
1855 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1856 dt_push_token(4, mem_start, mem_end);
1857 dt_push_token(soff, mem_start, mem_end);
1858 valp = make_room(mem_start, mem_end, 4, 4);
1859 *(u32 *)valp = node;
1862 /* do all our children */
1863 child = call_prom("child", 1, 1, node);
1864 while (child != 0) {
1865 scan_dt_build_struct(child, mem_start, mem_end);
1866 child = call_prom("peer", 1, 1, child);
1869 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1872 static void __init flatten_device_tree(void)
1875 unsigned long mem_start, mem_end, room;
1876 struct boot_param_header *hdr;
1877 struct prom_t *_prom = &RELOC(prom);
1882 * Check how much room we have between alloc top & bottom (+/- a
1883 * few pages), crop to 4Mb, as this is our "chuck" size
1885 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1886 if (room > DEVTREE_CHUNK_SIZE)
1887 room = DEVTREE_CHUNK_SIZE;
1888 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1890 /* Now try to claim that */
1891 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1893 prom_panic("Can't allocate initial device-tree chunk\n");
1894 mem_end = RELOC(alloc_top);
1896 /* Get root of tree */
1897 root = call_prom("peer", 1, 1, (phandle)0);
1898 if (root == (phandle)0)
1899 prom_panic ("couldn't get device tree root\n");
1901 /* Build header and make room for mem rsv map */
1902 mem_start = _ALIGN(mem_start, 4);
1903 hdr = make_room(&mem_start, &mem_end,
1904 sizeof(struct boot_param_header), 4);
1905 RELOC(dt_header_start) = (unsigned long)hdr;
1906 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1908 /* Start of strings */
1909 mem_start = PAGE_ALIGN(mem_start);
1910 RELOC(dt_string_start) = mem_start;
1911 mem_start += 4; /* hole */
1913 /* Add "linux,phandle" in there, we'll need it */
1914 namep = make_room(&mem_start, &mem_end, 16, 1);
1915 strcpy(namep, RELOC("linux,phandle"));
1916 mem_start = (unsigned long)namep + strlen(namep) + 1;
1918 /* Build string array */
1919 prom_printf("Building dt strings...\n");
1920 scan_dt_build_strings(root, &mem_start, &mem_end);
1921 RELOC(dt_string_end) = mem_start;
1923 /* Build structure */
1924 mem_start = PAGE_ALIGN(mem_start);
1925 RELOC(dt_struct_start) = mem_start;
1926 prom_printf("Building dt structure...\n");
1927 scan_dt_build_struct(root, &mem_start, &mem_end);
1928 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1929 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1932 hdr->boot_cpuid_phys = _prom->cpu;
1933 hdr->magic = OF_DT_HEADER;
1934 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1935 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1936 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1937 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1938 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1939 hdr->version = OF_DT_VERSION;
1940 /* Version 16 is not backward compatible */
1941 hdr->last_comp_version = 0x10;
1943 /* Reserve the whole thing and copy the reserve map in, we
1944 * also bump mem_reserve_cnt to cause further reservations to
1945 * fail since it's too late.
1947 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1948 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1953 prom_printf("reserved memory map:\n");
1954 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1955 prom_printf(" %x - %x\n",
1956 RELOC(mem_reserve_map)[i].base,
1957 RELOC(mem_reserve_map)[i].size);
1960 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1962 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1963 RELOC(dt_string_start), RELOC(dt_string_end));
1964 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1965 RELOC(dt_struct_start), RELOC(dt_struct_end));
1970 static void __init fixup_device_tree(void)
1972 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1973 phandle u3, i2c, mpic;
1978 /* Some G5s have a missing interrupt definition, fix it up here */
1979 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1980 if (!PHANDLE_VALID(u3))
1982 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1983 if (!PHANDLE_VALID(i2c))
1985 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1986 if (!PHANDLE_VALID(mpic))
1989 /* check if proper rev of u3 */
1990 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1993 if (u3_rev < 0x35 || u3_rev > 0x39)
1995 /* does it need fixup ? */
1996 if (prom_getproplen(i2c, "interrupts") > 0)
1999 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2001 /* interrupt on this revision of u3 is number 0 and level */
2004 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2005 &interrupts, sizeof(interrupts));
2007 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2008 &parent, sizeof(parent));
2013 static void __init prom_find_boot_cpu(void)
2015 struct prom_t *_prom = &RELOC(prom);
2021 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
2024 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2026 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
2027 _prom->cpu = getprop_rval;
2029 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
2032 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2034 #ifdef CONFIG_BLK_DEV_INITRD
2035 struct prom_t *_prom = &RELOC(prom);
2037 if (r3 && r4 && r4 != 0xdeadbeef) {
2040 RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
2041 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
2043 val = RELOC(prom_initrd_start);
2044 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
2046 val = RELOC(prom_initrd_end);
2047 prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
2050 reserve_mem(RELOC(prom_initrd_start),
2051 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
2053 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
2054 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
2056 #endif /* CONFIG_BLK_DEV_INITRD */
2060 * We enter here early on, when the Open Firmware prom is still
2061 * handling exceptions and the MMU hash table for us.
2064 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2066 unsigned long r6, unsigned long r7)
2068 struct prom_t *_prom;
2071 unsigned long offset = reloc_offset();
2077 _prom = &RELOC(prom);
2080 * First zero the BSS
2082 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
2085 * Init interface to Open Firmware, get some node references,
2088 prom_init_client_services(pp);
2091 * See if this OF is old enough that we need to do explicit maps
2092 * and other workarounds
2097 * Init prom stdout device
2101 /* Bail if this is a kdump kernel. */
2102 if (PHYSICAL_START > 0)
2103 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2106 * Check for an initrd
2108 prom_check_initrd(r3, r4);
2111 * Get default machine type. At this point, we do not differentiate
2112 * between pSeries SMP and pSeries LPAR
2114 RELOC(of_platform) = prom_find_machine_type();
2115 getprop_rval = RELOC(of_platform);
2116 prom_setprop(_prom->chosen, "/chosen", "linux,platform",
2117 &getprop_rval, sizeof(getprop_rval));
2119 #ifdef CONFIG_PPC_PSERIES
2121 * On pSeries, inform the firmware about our capabilities
2123 if (RELOC(of_platform) == PLATFORM_PSERIES ||
2124 RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
2125 prom_send_capabilities();
2129 * Copy the CPU hold code
2131 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2132 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2135 * Do early parsing of command line
2137 early_cmdline_parse();
2140 * Initialize memory management within prom_init
2145 if (RELOC(prom_crashk_base))
2146 reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
2149 * Determine which cpu is actually running right _now_
2151 prom_find_boot_cpu();
2154 * Initialize display devices
2156 prom_check_displays();
2160 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2161 * that uses the allocator, we need to make sure we get the top of memory
2162 * available for us here...
2164 if (RELOC(of_platform) == PLATFORM_PSERIES)
2165 prom_initialize_tce_table();
2169 * On non-powermacs, try to instantiate RTAS and puts all CPUs
2170 * in spin-loops. PowerMacs don't have a working RTAS and use
2171 * a different way to spin CPUs
2173 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2174 prom_instantiate_rtas();
2179 * Fill in some infos for use by the kernel later on
2181 if (RELOC(prom_memory_limit))
2182 prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
2183 &RELOC(prom_memory_limit),
2184 sizeof(prom_memory_limit));
2186 if (RELOC(ppc64_iommu_off))
2187 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
2190 if (RELOC(iommu_force_on))
2191 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
2194 if (RELOC(prom_tce_alloc_start)) {
2195 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
2196 &RELOC(prom_tce_alloc_start),
2197 sizeof(prom_tce_alloc_start));
2198 prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
2199 &RELOC(prom_tce_alloc_end),
2200 sizeof(prom_tce_alloc_end));
2205 if (RELOC(prom_crashk_base)) {
2206 prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
2207 PTRRELOC(&prom_crashk_base),
2208 sizeof(RELOC(prom_crashk_base)));
2209 prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
2210 PTRRELOC(&prom_crashk_size),
2211 sizeof(RELOC(prom_crashk_size)));
2215 * Fixup any known bugs in the device-tree
2217 fixup_device_tree();
2220 * Now finally create the flattened device-tree
2222 prom_printf("copying OF device tree ...\n");
2223 flatten_device_tree();
2226 * in case stdin is USB and still active on IBM machines...
2227 * Unfortunately quiesce crashes on some powermacs if we have
2228 * closed stdin already (in particular the powerbook 101).
2230 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2234 * Call OF "quiesce" method to shut down pending DMA's from
2237 prom_printf("Calling quiesce ...\n");
2238 call_prom("quiesce", 0, 0);
2241 * And finally, call the kernel passing it the flattened device
2242 * tree and NULL as r5, thus triggering the new entry point which
2243 * is common to us and kexec
2245 hdr = RELOC(dt_header_start);
2246 prom_printf("returning from prom_init\n");
2247 prom_debug("->dt_header_start=0x%x\n", hdr);
2250 reloc_got2(-offset);
2253 __start(hdr, KERNELBASE + offset, 0);