2 * Procedures for interfacing to Open Firmware.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stringify.h>
29 #include <linux/delay.h>
30 #include <linux/initrd.h>
31 #include <linux/bitops.h>
35 #include <asm/processor.h>
39 #include <asm/system.h>
41 #include <asm/pgtable.h>
43 #include <asm/iommu.h>
44 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/machdep.h>
48 #ifdef CONFIG_LOGO_LINUX_CLUT224
49 #include <linux/linux_logo.h>
50 extern const struct linux_logo logo_linux_clut224;
54 * Properties whose value is longer than this get excluded from our
55 * copy of the device tree. This value does need to be big enough to
56 * ensure that we don't lose things like the interrupt-map property
57 * on a PCI-PCI bridge.
59 #define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
62 * Eventually bump that one up
64 #define DEVTREE_CHUNK_SIZE 0x100000
67 * This is the size of the local memory reserve map that gets copied
68 * into the boot params passed to the kernel. That size is totally
69 * flexible as the kernel just reads the list until it encounters an
70 * entry with size 0, so it can be changed without breaking binary
73 #define MEM_RESERVE_MAP_SIZE 8
76 * prom_init() is called very early on, before the kernel text
77 * and data have been mapped to KERNELBASE. At this point the code
78 * is running at whatever address it has been loaded at.
79 * On ppc32 we compile with -mrelocatable, which means that references
80 * to extern and static variables get relocated automatically.
81 * On ppc64 we have to relocate the references explicitly with
82 * RELOC. (Note that strings count as static variables.)
84 * Because OF may have mapped I/O devices into the area starting at
85 * KERNELBASE, particularly on CHRP machines, we can't safely call
86 * OF once the kernel has been mapped to KERNELBASE. Therefore all
87 * OF calls must be done within prom_init().
89 * ADDR is used in calls to call_prom. The 4th and following
90 * arguments to call_prom should be 32-bit values.
91 * On ppc64, 64 bit values are truncated to 32 bits (and
92 * fortunately don't get interpreted as two arguments).
95 #define RELOC(x) (*PTRRELOC(&(x)))
96 #define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
99 #define ADDR(x) (u32) (x)
102 #define PROM_BUG() do { \
103 prom_printf("kernel BUG at %s line 0x%x!\n", \
104 RELOC(__FILE__), __LINE__); \
105 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
109 #define prom_debug(x...) prom_printf(x)
111 #define prom_debug(x...)
115 #define PLATFORM_POWERMAC _MACH_Pmac
116 #define PLATFORM_CHRP _MACH_chrp
120 typedef u32 prom_arg_t;
137 struct mem_map_entry {
144 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
147 extern int enter_prom(struct prom_args *args, unsigned long entry);
149 static inline int enter_prom(struct prom_args *args, unsigned long entry)
151 return ((int (*)(struct prom_args *))entry)(args);
155 extern void copy_and_flush(unsigned long dest, unsigned long src,
156 unsigned long size, unsigned long offset);
159 static struct prom_t __initdata prom;
161 static unsigned long prom_entry __initdata;
163 #define PROM_SCRATCH_SIZE 256
165 static char __initdata of_stdout_device[256];
166 static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
168 static unsigned long __initdata dt_header_start;
169 static unsigned long __initdata dt_struct_start, dt_struct_end;
170 static unsigned long __initdata dt_string_start, dt_string_end;
172 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
175 static int __initdata iommu_force_on;
176 static int __initdata ppc64_iommu_off;
177 static unsigned long __initdata prom_tce_alloc_start;
178 static unsigned long __initdata prom_tce_alloc_end;
181 static int __initdata of_platform;
183 static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185 static unsigned long __initdata prom_memory_limit;
187 static unsigned long __initdata alloc_top;
188 static unsigned long __initdata alloc_top_high;
189 static unsigned long __initdata alloc_bottom;
190 static unsigned long __initdata rmo_top;
191 static unsigned long __initdata ram_top;
193 static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
194 static int __initdata mem_reserve_cnt;
196 static cell_t __initdata regbuf[1024];
199 #define MAX_CPU_THREADS 2
205 unsigned int threadid;
206 } hmt_thread_data[NR_CPUS];
207 #endif /* CONFIG_HMT */
210 * Error results ... some OF calls will return "-1" on error, some
211 * will return 0, some will return either. To simplify, here are
212 * macros to use with any ihandle or phandle return value to check if
216 #define PROM_ERROR (-1u)
217 #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
218 #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
221 /* This is the one and *ONLY* place where we actually call open
225 static int __init call_prom(const char *service, int nargs, int nret, ...)
228 struct prom_args args;
231 args.service = ADDR(service);
235 va_start(list, nret);
236 for (i = 0; i < nargs; i++)
237 args.args[i] = va_arg(list, prom_arg_t);
240 for (i = 0; i < nret; i++)
241 args.args[nargs+i] = 0;
243 if (enter_prom(&args, RELOC(prom_entry)) < 0)
246 return (nret > 0) ? args.args[nargs] : 0;
249 static int __init call_prom_ret(const char *service, int nargs, int nret,
250 prom_arg_t *rets, ...)
253 struct prom_args args;
256 args.service = ADDR(service);
260 va_start(list, rets);
261 for (i = 0; i < nargs; i++)
262 args.args[i] = va_arg(list, prom_arg_t);
265 for (i = 0; i < nret; i++)
268 if (enter_prom(&args, RELOC(prom_entry)) < 0)
272 for (i = 1; i < nret; ++i)
273 rets[i-1] = args.args[nargs+i];
275 return (nret > 0) ? args.args[nargs] : 0;
279 static void __init prom_print(const char *msg)
282 struct prom_t *_prom = &RELOC(prom);
284 if (_prom->stdout == 0)
287 for (p = msg; *p != 0; p = q) {
288 for (q = p; *q != 0 && *q != '\n'; ++q)
291 call_prom("write", 3, 1, _prom->stdout, p, q - p);
295 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
300 static void __init prom_print_hex(unsigned long val)
302 int i, nibbles = sizeof(val)*2;
303 char buf[sizeof(val)*2+1];
304 struct prom_t *_prom = &RELOC(prom);
306 for (i = nibbles-1; i >= 0; i--) {
307 buf[i] = (val & 0xf) + '0';
309 buf[i] += ('a'-'0'-10);
313 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
317 static void __init prom_printf(const char *format, ...)
319 const char *p, *q, *s;
322 struct prom_t *_prom = &RELOC(prom);
324 va_start(args, format);
326 format = PTRRELOC(format);
328 for (p = format; *p != 0; p = q) {
329 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
332 call_prom("write", 3, 1, _prom->stdout, p, q - p);
337 call_prom("write", 3, 1, _prom->stdout,
347 s = va_arg(args, const char *);
352 v = va_arg(args, unsigned long);
360 static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
364 struct prom_t *_prom = &RELOC(prom);
366 ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
368 if (ret != -1 && _prom->mmumap != 0)
369 /* old pmacs need us to map as well */
370 call_prom("call-method", 6, 1,
371 ADDR("map"), _prom->mmumap, 0, size, virt, virt);
375 static void __init __attribute__((noreturn)) prom_panic(const char *reason)
378 reason = PTRRELOC(reason);
381 /* ToDo: should put up an SRC here on p/iSeries */
382 call_prom("exit", 0, 0);
384 for (;;) /* should never get here */
389 static int __init prom_next_node(phandle *nodep)
393 if ((node = *nodep) != 0
394 && (*nodep = call_prom("child", 1, 1, node)) != 0)
396 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
399 if ((node = call_prom("parent", 1, 1, node)) == 0)
401 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
406 static int __init prom_getprop(phandle node, const char *pname,
407 void *value, size_t valuelen)
409 return call_prom("getprop", 4, 1, node, ADDR(pname),
410 (u32)(unsigned long) value, (u32) valuelen);
413 static int __init prom_getproplen(phandle node, const char *pname)
415 return call_prom("getproplen", 2, 1, node, ADDR(pname));
418 static int __init prom_setprop(phandle node, const char *pname,
419 void *value, size_t valuelen)
421 return call_prom("setprop", 4, 1, node, ADDR(pname),
422 (u32)(unsigned long) value, (u32) valuelen);
425 /* We can't use the standard versions because of RELOC headaches. */
426 #define isxdigit(c) (('0' <= (c) && (c) <= '9') \
427 || ('a' <= (c) && (c) <= 'f') \
428 || ('A' <= (c) && (c) <= 'F'))
430 #define isdigit(c) ('0' <= (c) && (c) <= '9')
431 #define islower(c) ('a' <= (c) && (c) <= 'z')
432 #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
434 unsigned long prom_strtoul(const char *cp, const char **endp)
436 unsigned long result = 0, base = 10, value;
441 if (toupper(*cp) == 'X') {
447 while (isxdigit(*cp) &&
448 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
449 result = result * base + value;
459 unsigned long prom_memparse(const char *ptr, const char **retptr)
461 unsigned long ret = prom_strtoul(ptr, retptr);
465 * We can't use a switch here because GCC *may* generate a
466 * jump table which won't work, because we're not running at
467 * the address we're linked at.
469 if ('G' == **retptr || 'g' == **retptr)
472 if ('M' == **retptr || 'm' == **retptr)
475 if ('K' == **retptr || 'k' == **retptr)
487 * Early parsing of the command line passed to the kernel, used for
488 * "mem=x" and the options that affect the iommu
490 static void __init early_cmdline_parse(void)
492 struct prom_t *_prom = &RELOC(prom);
496 RELOC(prom_cmd_line[0]) = 0;
497 p = RELOC(prom_cmd_line);
498 if ((long)_prom->chosen > 0)
499 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
500 #ifdef CONFIG_CMDLINE
501 if (l == 0) /* dbl check */
502 strlcpy(RELOC(prom_cmd_line),
503 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
504 #endif /* CONFIG_CMDLINE */
505 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
508 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
510 prom_printf("iommu opt is: %s\n", opt);
512 while (*opt && *opt == ' ')
514 if (!strncmp(opt, RELOC("off"), 3))
515 RELOC(ppc64_iommu_off) = 1;
516 else if (!strncmp(opt, RELOC("force"), 5))
517 RELOC(iommu_force_on) = 1;
521 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
524 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
526 /* Align to 16 MB == size of ppc64 large page */
527 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
532 #ifdef CONFIG_PPC_PSERIES
534 * To tell the firmware what our capabilities are, we have to pass
535 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
536 * that contain structures that contain the actual values.
538 static struct fake_elf {
545 char name[8]; /* "PowerPC" */
559 char name[24]; /* "IBM,RPA-Client-Config" */
573 .e_ident = { 0x7f, 'E', 'L', 'F',
574 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
575 .e_type = ET_EXEC, /* yeah right */
577 .e_version = EV_CURRENT,
578 .e_phoff = offsetof(struct fake_elf, phdr),
579 .e_phentsize = sizeof(Elf32_Phdr),
585 .p_offset = offsetof(struct fake_elf, chrpnote),
586 .p_filesz = sizeof(struct chrpnote)
589 .p_offset = offsetof(struct fake_elf, rpanote),
590 .p_filesz = sizeof(struct rpanote)
594 .namesz = sizeof("PowerPC"),
595 .descsz = sizeof(struct chrpdesc),
599 .real_mode = ~0U, /* ~0 means "don't care" */
608 .namesz = sizeof("IBM,RPA-Client-Config"),
609 .descsz = sizeof(struct rpadesc),
611 .name = "IBM,RPA-Client-Config",
614 .min_rmo_size = 64, /* in megabytes */
615 .min_rmo_percent = 0,
616 .max_pft_size = 48, /* 2^48 bytes max PFT size */
624 static void __init prom_send_capabilities(void)
628 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
629 if (elfloader == 0) {
630 prom_printf("couldn't open /packages/elf-loader\n");
633 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
634 elfloader, ADDR(&fake_elf));
635 call_prom("close", 1, 0, elfloader);
640 * Memory allocation strategy... our layout is normally:
642 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
643 * rare cases, initrd might end up being before the kernel though.
644 * We assume this won't override the final kernel at 0, we have no
645 * provision to handle that in this version, but it should hopefully
648 * alloc_top is set to the top of RMO, eventually shrink down if the
651 * alloc_bottom is set to the top of kernel/initrd
653 * from there, allocations are done this way : rtas is allocated
654 * topmost, and the device-tree is allocated from the bottom. We try
655 * to grow the device-tree allocation as we progress. If we can't,
656 * then we fail, we don't currently have a facility to restart
657 * elsewhere, but that shouldn't be necessary.
659 * Note that calls to reserve_mem have to be done explicitly, memory
660 * allocated with either alloc_up or alloc_down isn't automatically
666 * Allocates memory in the RMO upward from the kernel/initrd
668 * When align is 0, this is a special case, it means to allocate in place
669 * at the current location of alloc_bottom or fail (that is basically
670 * extending the previous allocation). Used for the device-tree flattening
672 static unsigned long __init alloc_up(unsigned long size, unsigned long align)
674 unsigned long base = RELOC(alloc_bottom);
675 unsigned long addr = 0;
678 base = _ALIGN_UP(base, align);
679 prom_debug("alloc_up(%x, %x)\n", size, align);
680 if (RELOC(ram_top) == 0)
681 prom_panic("alloc_up() called with mem not initialized\n");
684 base = _ALIGN_UP(RELOC(alloc_bottom), align);
686 base = RELOC(alloc_bottom);
688 for(; (base + size) <= RELOC(alloc_top);
689 base = _ALIGN_UP(base + 0x100000, align)) {
690 prom_debug(" trying: 0x%x\n\r", base);
691 addr = (unsigned long)prom_claim(base, size, 0);
692 if (addr != PROM_ERROR && addr != 0)
700 RELOC(alloc_bottom) = addr;
702 prom_debug(" -> %x\n", addr);
703 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
704 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
705 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
706 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
707 prom_debug(" ram_top : %x\n", RELOC(ram_top));
713 * Allocates memory downward, either from top of RMO, or if highmem
714 * is set, from the top of RAM. Note that this one doesn't handle
715 * failures. It does claim memory if highmem is not set.
717 static unsigned long __init alloc_down(unsigned long size, unsigned long align,
720 unsigned long base, addr = 0;
722 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
723 highmem ? RELOC("(high)") : RELOC("(low)"));
724 if (RELOC(ram_top) == 0)
725 prom_panic("alloc_down() called with mem not initialized\n");
728 /* Carve out storage for the TCE table. */
729 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
730 if (addr <= RELOC(alloc_bottom))
732 /* Will we bump into the RMO ? If yes, check out that we
733 * didn't overlap existing allocations there, if we did,
734 * we are dead, we must be the first in town !
736 if (addr < RELOC(rmo_top)) {
737 /* Good, we are first */
738 if (RELOC(alloc_top) == RELOC(rmo_top))
739 RELOC(alloc_top) = RELOC(rmo_top) = addr;
743 RELOC(alloc_top_high) = addr;
747 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
748 for (; base > RELOC(alloc_bottom);
749 base = _ALIGN_DOWN(base - 0x100000, align)) {
750 prom_debug(" trying: 0x%x\n\r", base);
751 addr = (unsigned long)prom_claim(base, size, 0);
752 if (addr != PROM_ERROR && addr != 0)
758 RELOC(alloc_top) = addr;
761 prom_debug(" -> %x\n", addr);
762 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
763 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
764 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
765 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
766 prom_debug(" ram_top : %x\n", RELOC(ram_top));
774 static unsigned long __init prom_next_cell(int s, cell_t **cellp)
779 /* Ignore more than 2 cells */
780 while (s > sizeof(unsigned long) / 4) {
796 * Very dumb function for adding to the memory reserve list, but
797 * we don't need anything smarter at this point
799 * XXX Eventually check for collisions. They should NEVER happen.
800 * If problems seem to show up, it would be a good start to track
803 static void reserve_mem(unsigned long base, unsigned long size)
805 unsigned long top = base + size;
806 unsigned long cnt = RELOC(mem_reserve_cnt);
811 /* We need to always keep one empty entry so that we
812 * have our terminator with "size" set to 0 since we are
813 * dumb and just copy this entire array to the boot params
815 base = _ALIGN_DOWN(base, PAGE_SIZE);
816 top = _ALIGN_UP(top, PAGE_SIZE);
819 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
820 prom_panic("Memory reserve map exhausted !\n");
821 RELOC(mem_reserve_map)[cnt].base = base;
822 RELOC(mem_reserve_map)[cnt].size = size;
823 RELOC(mem_reserve_cnt) = cnt + 1;
827 * Initialize memory allocation mecanism, parse "memory" nodes and
828 * obtain that way the top of memory and RMO to setup out local allocator
830 static void __init prom_init_mem(void)
833 char *path, type[64];
836 struct prom_t *_prom = &RELOC(prom);
840 * We iterate the memory nodes to find
841 * 1) top of RMO (first node)
845 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
847 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
848 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
849 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
851 prom_debug("scanning memory:\n");
852 path = RELOC(prom_scratch);
854 for (node = 0; prom_next_node(&node); ) {
856 prom_getprop(node, "device_type", type, sizeof(type));
860 * CHRP Longtrail machines have no device_type
861 * on the memory node, so check the name instead...
863 prom_getprop(node, "name", type, sizeof(type));
865 if (strcmp(type, RELOC("memory")))
868 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
869 if (plen > sizeof(regbuf)) {
870 prom_printf("memory node too large for buffer !\n");
871 plen = sizeof(regbuf);
874 endp = p + (plen / sizeof(cell_t));
877 memset(path, 0, PROM_SCRATCH_SIZE);
878 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
879 prom_debug(" node %s :\n", path);
880 #endif /* DEBUG_PROM */
882 while ((endp - p) >= (rac + rsc)) {
883 unsigned long base, size;
885 base = prom_next_cell(rac, &p);
886 size = prom_next_cell(rsc, &p);
890 prom_debug(" %x %x\n", base, size);
892 RELOC(rmo_top) = size;
893 if ((base + size) > RELOC(ram_top))
894 RELOC(ram_top) = base + size;
898 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
900 /* Check if we have an initrd after the kernel, if we do move our bottom
903 if (RELOC(prom_initrd_start)) {
904 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
905 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
909 * If prom_memory_limit is set we reduce the upper limits *except* for
910 * alloc_top_high. This must be the real top of RAM so we can put
914 RELOC(alloc_top_high) = RELOC(ram_top);
916 if (RELOC(prom_memory_limit)) {
917 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
918 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
919 RELOC(prom_memory_limit));
920 RELOC(prom_memory_limit) = 0;
921 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
922 prom_printf("Ignoring mem=%x >= ram_top.\n",
923 RELOC(prom_memory_limit));
924 RELOC(prom_memory_limit) = 0;
926 RELOC(ram_top) = RELOC(prom_memory_limit);
927 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
932 * Setup our top alloc point, that is top of RMO or top of
933 * segment 0 when running non-LPAR.
934 * Some RS64 machines have buggy firmware where claims up at
935 * 1GB fail. Cap at 768MB as a workaround.
936 * Since 768MB is plenty of room, and we need to cap to something
937 * reasonable on 32-bit, cap at 768MB on all machines.
940 RELOC(rmo_top) = RELOC(ram_top);
941 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
942 RELOC(alloc_top) = RELOC(rmo_top);
944 prom_printf("memory layout at init:\n");
945 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
946 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
947 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
948 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
949 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
950 prom_printf(" ram_top : %x\n", RELOC(ram_top));
955 * Allocate room for and instantiate RTAS
957 static void __init prom_instantiate_rtas(void)
964 prom_debug("prom_instantiate_rtas: start...\n");
966 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
967 prom_debug("rtas_node: %x\n", rtas_node);
968 if (!PHANDLE_VALID(rtas_node))
971 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
975 base = alloc_down(size, PAGE_SIZE, 0);
977 prom_printf("RTAS allocation failed !\n");
981 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
982 if (!IHANDLE_VALID(rtas_inst)) {
983 prom_printf("opening rtas package failed");
987 prom_printf("instantiating rtas at 0x%x ...", base);
989 if (call_prom_ret("call-method", 3, 2, &entry,
990 ADDR("instantiate-rtas"),
991 rtas_inst, base) == PROM_ERROR
993 prom_printf(" failed\n");
996 prom_printf(" done\n");
998 reserve_mem(base, size);
1000 prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
1001 prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
1003 prom_debug("rtas base = 0x%x\n", base);
1004 prom_debug("rtas entry = 0x%x\n", entry);
1005 prom_debug("rtas size = 0x%x\n", (long)size);
1007 prom_debug("prom_instantiate_rtas: end...\n");
1012 * Allocate room for and initialize TCE tables
1014 static void __init prom_initialize_tce_table(void)
1018 char compatible[64], type[64], model[64];
1019 char *path = RELOC(prom_scratch);
1021 u32 minalign, minsize;
1022 u64 tce_entry, *tce_entryp;
1023 u64 local_alloc_top, local_alloc_bottom;
1026 if (RELOC(ppc64_iommu_off))
1029 prom_debug("starting prom_initialize_tce_table\n");
1031 /* Cache current top of allocs so we reserve a single block */
1032 local_alloc_top = RELOC(alloc_top_high);
1033 local_alloc_bottom = local_alloc_top;
1035 /* Search all nodes looking for PHBs. */
1036 for (node = 0; prom_next_node(&node); ) {
1040 prom_getprop(node, "compatible",
1041 compatible, sizeof(compatible));
1042 prom_getprop(node, "device_type", type, sizeof(type));
1043 prom_getprop(node, "model", model, sizeof(model));
1045 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1048 /* Keep the old logic in tack to avoid regression. */
1049 if (compatible[0] != 0) {
1050 if ((strstr(compatible, RELOC("python")) == NULL) &&
1051 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1052 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1054 } else if (model[0] != 0) {
1055 if ((strstr(model, RELOC("ython")) == NULL) &&
1056 (strstr(model, RELOC("peedwagon")) == NULL) &&
1057 (strstr(model, RELOC("innipeg")) == NULL))
1061 if (prom_getprop(node, "tce-table-minalign", &minalign,
1062 sizeof(minalign)) == PROM_ERROR)
1064 if (prom_getprop(node, "tce-table-minsize", &minsize,
1065 sizeof(minsize)) == PROM_ERROR)
1066 minsize = 4UL << 20;
1069 * Even though we read what OF wants, we just set the table
1070 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1071 * By doing this, we avoid the pitfalls of trying to DMA to
1072 * MMIO space and the DMA alias hole.
1074 * On POWER4, firmware sets the TCE region by assuming
1075 * each TCE table is 8MB. Using this memory for anything
1076 * else will impact performance, so we always allocate 8MB.
1079 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1080 minsize = 8UL << 20;
1082 minsize = 4UL << 20;
1084 /* Align to the greater of the align or size */
1085 align = max(minalign, minsize);
1086 base = alloc_down(minsize, align, 1);
1088 prom_panic("ERROR, cannot find space for TCE table.\n");
1089 if (base < local_alloc_bottom)
1090 local_alloc_bottom = base;
1092 /* Save away the TCE table attributes for later use. */
1093 prom_setprop(node, "linux,tce-base", &base, sizeof(base));
1094 prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
1096 /* It seems OF doesn't null-terminate the path :-( */
1097 memset(path, 0, sizeof(path));
1098 /* Call OF to setup the TCE hardware */
1099 if (call_prom("package-to-path", 3, 1, node,
1100 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1101 prom_printf("package-to-path failed\n");
1104 prom_debug("TCE table: %s\n", path);
1105 prom_debug("\tnode = 0x%x\n", node);
1106 prom_debug("\tbase = 0x%x\n", base);
1107 prom_debug("\tsize = 0x%x\n", minsize);
1109 /* Initialize the table to have a one-to-one mapping
1110 * over the allocated size.
1112 tce_entryp = (unsigned long *)base;
1113 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1114 tce_entry = (i << PAGE_SHIFT);
1116 *tce_entryp = tce_entry;
1119 prom_printf("opening PHB %s", path);
1120 phb_node = call_prom("open", 1, 1, path);
1122 prom_printf("... failed\n");
1124 prom_printf("... done\n");
1126 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1127 phb_node, -1, minsize,
1128 (u32) base, (u32) (base >> 32));
1129 call_prom("close", 1, 0, phb_node);
1132 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1134 if (RELOC(prom_memory_limit)) {
1136 * We align the start to a 16MB boundary so we can map
1137 * the TCE area using large pages if possible.
1138 * The end should be the top of RAM so no need to align it.
1140 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1142 RELOC(prom_tce_alloc_end) = local_alloc_top;
1145 /* Flag the first invalid entry */
1146 prom_debug("ending prom_initialize_tce_table\n");
1151 * With CHRP SMP we need to use the OF to start the other processors.
1152 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1153 * so we have to put the processors into a holding pattern controlled
1154 * by the kernel (not OF) before we destroy the OF.
1156 * This uses a chunk of low memory, puts some holding pattern
1157 * code there and sends the other processors off to there until
1158 * smp_boot_cpus tells them to do something. The holding pattern
1159 * checks that address until its cpu # is there, when it is that
1160 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1161 * of setting those values.
1163 * We also use physical address 0x4 here to tell when a cpu
1164 * is in its holding pattern code.
1168 extern void __secondary_hold(void);
1169 extern unsigned long __secondary_hold_spinloop;
1170 extern unsigned long __secondary_hold_acknowledge;
1173 * We want to reference the copy of __secondary_hold_* in the
1174 * 0 - 0x100 address range
1176 #define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1178 static void __init prom_hold_cpus(void)
1185 unsigned int interrupt_server[MAX_CPU_THREADS];
1186 unsigned int cpu_threads, hw_cpu_num;
1188 struct prom_t *_prom = &RELOC(prom);
1189 unsigned long *spinloop
1190 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1191 unsigned long *acknowledge
1192 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1194 /* __secondary_hold is actually a descriptor, not the text address */
1195 unsigned long secondary_hold
1196 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1198 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1201 prom_debug("prom_hold_cpus: start...\n");
1202 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1203 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1204 prom_debug(" 1) acknowledge = 0x%x\n",
1205 (unsigned long)acknowledge);
1206 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1207 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1209 /* Set the common spinloop variable, so all of the secondary cpus
1210 * will block when they are awakened from their OF spinloop.
1211 * This must occur for both SMP and non SMP kernels, since OF will
1212 * be trashed when we move the kernel.
1217 for (i = 0; i < NR_CPUS; i++)
1218 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1221 for (node = 0; prom_next_node(&node); ) {
1223 prom_getprop(node, "device_type", type, sizeof(type));
1224 if (strcmp(type, RELOC("cpu")) != 0)
1227 /* Skip non-configured cpus. */
1228 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1229 if (strcmp(type, RELOC("okay")) != 0)
1233 prom_getprop(node, "reg", ®, sizeof(reg));
1235 prom_debug("\ncpuid = 0x%x\n", cpuid);
1236 prom_debug("cpu hw idx = 0x%x\n", reg);
1238 /* Init the acknowledge var which will be reset by
1239 * the secondary cpu when it awakens from its OF
1242 *acknowledge = (unsigned long)-1;
1244 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1246 sizeof(interrupt_server));
1248 /* no property. old hardware has no SMT */
1250 interrupt_server[0] = reg; /* fake it with phys id */
1252 /* We have a threaded processor */
1253 cpu_threads = propsize / sizeof(u32);
1254 if (cpu_threads > MAX_CPU_THREADS) {
1255 prom_printf("SMT: too many threads!\n"
1256 "SMT: found %x, max is %x\n",
1257 cpu_threads, MAX_CPU_THREADS);
1258 cpu_threads = 1; /* ToDo: panic? */
1262 hw_cpu_num = interrupt_server[0];
1263 if (hw_cpu_num != _prom->cpu) {
1264 /* Primary Thread of non-boot cpu */
1265 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1266 call_prom("start-cpu", 3, 0, node,
1267 secondary_hold, reg);
1269 for (i = 0; (i < 100000000) &&
1270 (*acknowledge == ((unsigned long)-1)); i++ )
1273 if (*acknowledge == reg)
1274 prom_printf("done\n");
1276 prom_printf("failed: %x\n", *acknowledge);
1280 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1281 #endif /* CONFIG_SMP */
1283 /* Reserve cpu #s for secondary threads. They start later. */
1284 cpuid += cpu_threads;
1287 /* Only enable HMT on processors that provide support. */
1288 if (__is_processor(PV_PULSAR) ||
1289 __is_processor(PV_ICESTAR) ||
1290 __is_processor(PV_SSTAR)) {
1291 prom_printf(" starting secondary threads\n");
1293 for (i = 0; i < NR_CPUS; i += 2) {
1298 unsigned long pir = mfspr(SPRN_PIR);
1299 if (__is_processor(PV_PULSAR)) {
1300 RELOC(hmt_thread_data)[i].pir =
1303 RELOC(hmt_thread_data)[i].pir =
1309 prom_printf("Processor is not HMT capable\n");
1313 if (cpuid > NR_CPUS)
1314 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1315 ") exceeded: ignoring extras\n");
1317 prom_debug("prom_hold_cpus: end...\n");
1321 static void __init prom_init_client_services(unsigned long pp)
1323 struct prom_t *_prom = &RELOC(prom);
1325 /* Get a handle to the prom entry point before anything else */
1326 RELOC(prom_entry) = pp;
1328 /* get a handle for the stdout device */
1329 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1330 if (!PHANDLE_VALID(_prom->chosen))
1331 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1333 /* get device tree root */
1334 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1335 if (!PHANDLE_VALID(_prom->root))
1336 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1343 * For really old powermacs, we need to map things we claim.
1344 * For that, we need the ihandle of the mmu.
1346 static void __init prom_find_mmu(void)
1348 struct prom_t *_prom = &RELOC(prom);
1352 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1353 if (!PHANDLE_VALID(oprom))
1355 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1357 version[sizeof(version) - 1] = 0;
1358 prom_printf("OF version is '%s'\n", version);
1359 /* XXX might need to add other versions here */
1360 if (strcmp(version, "Open Firmware, 1.0.5") != 0)
1362 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1363 sizeof(_prom->mmumap));
1366 #define prom_find_mmu()
1369 static void __init prom_init_stdout(void)
1371 struct prom_t *_prom = &RELOC(prom);
1372 char *path = RELOC(of_stdout_device);
1376 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1377 prom_panic("cannot find stdout");
1379 _prom->stdout = val;
1381 /* Get the full OF pathname of the stdout device */
1382 memset(path, 0, 256);
1383 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1384 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1385 prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
1386 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1387 prom_setprop(_prom->chosen, "linux,stdout-path",
1388 RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
1390 /* If it's a display, note it */
1391 memset(type, 0, sizeof(type));
1392 prom_getprop(val, "device_type", type, sizeof(type));
1393 if (strcmp(type, RELOC("display")) == 0)
1394 prom_setprop(val, "linux,boot-display", NULL, 0);
1397 static void __init prom_close_stdin(void)
1399 struct prom_t *_prom = &RELOC(prom);
1402 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1403 call_prom("close", 1, 0, val);
1406 static int __init prom_find_machine_type(void)
1408 struct prom_t *_prom = &RELOC(prom);
1413 len = prom_getprop(_prom->root, "compatible",
1414 compat, sizeof(compat)-1);
1418 char *p = &compat[i];
1422 if (strstr(p, RELOC("Power Macintosh")) ||
1423 strstr(p, RELOC("MacRISC")))
1424 return PLATFORM_POWERMAC;
1426 if (strstr(p, RELOC("Momentum,Maple")))
1427 return PLATFORM_MAPLE;
1433 /* Default to pSeries. We need to know if we are running LPAR */
1434 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1435 if (PHANDLE_VALID(rtas)) {
1436 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1437 if (x != PROM_ERROR) {
1438 prom_printf("Hypertas detected, assuming LPAR !\n");
1439 return PLATFORM_PSERIES_LPAR;
1442 return PLATFORM_PSERIES;
1444 return PLATFORM_CHRP;
1448 static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1450 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1454 * If we have a display that we don't know how to drive,
1455 * we will want to try to execute OF's open method for it
1456 * later. However, OF will probably fall over if we do that
1457 * we've taken over the MMU.
1458 * So we check whether we will need to open the display,
1459 * and if so, open it now.
1461 static void __init prom_check_displays(void)
1463 char type[16], *path;
1468 static unsigned char default_colors[] = {
1486 const unsigned char *clut;
1488 prom_printf("Looking for displays\n");
1489 for (node = 0; prom_next_node(&node); ) {
1490 memset(type, 0, sizeof(type));
1491 prom_getprop(node, "device_type", type, sizeof(type));
1492 if (strcmp(type, RELOC("display")) != 0)
1495 /* It seems OF doesn't null-terminate the path :-( */
1496 path = RELOC(prom_scratch);
1497 memset(path, 0, PROM_SCRATCH_SIZE);
1500 * leave some room at the end of the path for appending extra
1503 if (call_prom("package-to-path", 3, 1, node, path,
1504 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1506 prom_printf("found display : %s, opening ... ", path);
1508 ih = call_prom("open", 1, 1, path);
1510 prom_printf("failed\n");
1515 prom_printf("done\n");
1516 prom_setprop(node, "linux,opened", NULL, 0);
1518 /* Setup a usable color table when the appropriate
1519 * method is available. Should update this to set-colors */
1520 clut = RELOC(default_colors);
1521 for (i = 0; i < 32; i++, clut += 3)
1522 if (prom_set_color(ih, i, clut[0], clut[1],
1526 #ifdef CONFIG_LOGO_LINUX_CLUT224
1527 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1528 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1529 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1532 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
1537 /* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1538 static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1539 unsigned long needed, unsigned long align)
1543 *mem_start = _ALIGN(*mem_start, align);
1544 while ((*mem_start + needed) > *mem_end) {
1545 unsigned long room, chunk;
1547 prom_debug("Chunk exhausted, claiming more at %x...\n",
1548 RELOC(alloc_bottom));
1549 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1550 if (room > DEVTREE_CHUNK_SIZE)
1551 room = DEVTREE_CHUNK_SIZE;
1552 if (room < PAGE_SIZE)
1553 prom_panic("No memory for flatten_device_tree (no room)");
1554 chunk = alloc_up(room, 0);
1556 prom_panic("No memory for flatten_device_tree (claim failed)");
1557 *mem_end = RELOC(alloc_top);
1560 ret = (void *)*mem_start;
1561 *mem_start += needed;
1566 #define dt_push_token(token, mem_start, mem_end) \
1567 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1569 static unsigned long __init dt_find_string(char *str)
1573 s = os = (char *)RELOC(dt_string_start);
1575 while (s < (char *)RELOC(dt_string_end)) {
1576 if (strcmp(s, str) == 0)
1584 * The Open Firmware 1275 specification states properties must be 31 bytes or
1585 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1587 #define MAX_PROPERTY_NAME 64
1589 static void __init scan_dt_build_strings(phandle node,
1590 unsigned long *mem_start,
1591 unsigned long *mem_end)
1593 char *prev_name, *namep, *sstart;
1597 sstart = (char *)RELOC(dt_string_start);
1599 /* get and store all property names */
1600 prev_name = RELOC("");
1602 /* 64 is max len of name including nul. */
1603 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1604 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1605 /* No more nodes: unwind alloc */
1606 *mem_start = (unsigned long)namep;
1611 if (strcmp(namep, RELOC("name")) == 0) {
1612 *mem_start = (unsigned long)namep;
1613 prev_name = RELOC("name");
1616 /* get/create string entry */
1617 soff = dt_find_string(namep);
1619 *mem_start = (unsigned long)namep;
1620 namep = sstart + soff;
1622 /* Trim off some if we can */
1623 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1624 RELOC(dt_string_end) = *mem_start;
1629 /* do all our children */
1630 child = call_prom("child", 1, 1, node);
1631 while (child != 0) {
1632 scan_dt_build_strings(child, mem_start, mem_end);
1633 child = call_prom("peer", 1, 1, child);
1637 static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1638 unsigned long *mem_end)
1641 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1643 unsigned char *valp;
1644 static char pname[MAX_PROPERTY_NAME];
1647 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1649 /* get the node's full name */
1650 namep = (char *)*mem_start;
1651 room = *mem_end - *mem_start;
1654 l = call_prom("package-to-path", 3, 1, node, namep, room);
1656 /* Didn't fit? Get more room. */
1658 if (l >= *mem_end - *mem_start)
1659 namep = make_room(mem_start, mem_end, l+1, 1);
1660 call_prom("package-to-path", 3, 1, node, namep, l);
1664 /* Fixup an Apple bug where they have bogus \0 chars in the
1665 * middle of the path in some properties, and extract
1666 * the unit name (everything after the last '/').
1668 for (lp = p = namep, ep = namep + l; p < ep; p++) {
1675 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
1678 /* get it again for debugging */
1679 path = RELOC(prom_scratch);
1680 memset(path, 0, PROM_SCRATCH_SIZE);
1681 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1683 /* get and store all properties */
1684 prev_name = RELOC("");
1685 sstart = (char *)RELOC(dt_string_start);
1687 if (call_prom("nextprop", 3, 1, node, prev_name,
1692 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1693 prev_name = RELOC("name");
1697 /* find string offset */
1698 soff = dt_find_string(RELOC(pname));
1700 prom_printf("WARNING: Can't find string index for"
1701 " <%s>, node %s\n", RELOC(pname), path);
1704 prev_name = sstart + soff;
1707 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1710 if (l == PROM_ERROR)
1712 if (l > MAX_PROPERTY_LENGTH) {
1713 prom_printf("WARNING: ignoring large property ");
1714 /* It seems OF doesn't null-terminate the path :-( */
1715 prom_printf("[%s] ", path);
1716 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1720 /* push property head */
1721 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1722 dt_push_token(l, mem_start, mem_end);
1723 dt_push_token(soff, mem_start, mem_end);
1725 /* push property content */
1726 valp = make_room(mem_start, mem_end, l, 4);
1727 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1728 *mem_start = _ALIGN(*mem_start, 4);
1731 /* Add a "linux,phandle" property. */
1732 soff = dt_find_string(RELOC("linux,phandle"));
1734 prom_printf("WARNING: Can't find string index for"
1735 " <linux-phandle> node %s\n", path);
1737 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1738 dt_push_token(4, mem_start, mem_end);
1739 dt_push_token(soff, mem_start, mem_end);
1740 valp = make_room(mem_start, mem_end, 4, 4);
1741 *(u32 *)valp = node;
1744 /* do all our children */
1745 child = call_prom("child", 1, 1, node);
1746 while (child != 0) {
1747 scan_dt_build_struct(child, mem_start, mem_end);
1748 child = call_prom("peer", 1, 1, child);
1751 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1754 static void __init flatten_device_tree(void)
1757 unsigned long mem_start, mem_end, room;
1758 struct boot_param_header *hdr;
1759 struct prom_t *_prom = &RELOC(prom);
1764 * Check how much room we have between alloc top & bottom (+/- a
1765 * few pages), crop to 4Mb, as this is our "chuck" size
1767 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1768 if (room > DEVTREE_CHUNK_SIZE)
1769 room = DEVTREE_CHUNK_SIZE;
1770 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1772 /* Now try to claim that */
1773 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1775 prom_panic("Can't allocate initial device-tree chunk\n");
1776 mem_end = RELOC(alloc_top);
1778 /* Get root of tree */
1779 root = call_prom("peer", 1, 1, (phandle)0);
1780 if (root == (phandle)0)
1781 prom_panic ("couldn't get device tree root\n");
1783 /* Build header and make room for mem rsv map */
1784 mem_start = _ALIGN(mem_start, 4);
1785 hdr = make_room(&mem_start, &mem_end,
1786 sizeof(struct boot_param_header), 4);
1787 RELOC(dt_header_start) = (unsigned long)hdr;
1788 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1790 /* Start of strings */
1791 mem_start = PAGE_ALIGN(mem_start);
1792 RELOC(dt_string_start) = mem_start;
1793 mem_start += 4; /* hole */
1795 /* Add "linux,phandle" in there, we'll need it */
1796 namep = make_room(&mem_start, &mem_end, 16, 1);
1797 strcpy(namep, RELOC("linux,phandle"));
1798 mem_start = (unsigned long)namep + strlen(namep) + 1;
1800 /* Build string array */
1801 prom_printf("Building dt strings...\n");
1802 scan_dt_build_strings(root, &mem_start, &mem_end);
1803 RELOC(dt_string_end) = mem_start;
1805 /* Build structure */
1806 mem_start = PAGE_ALIGN(mem_start);
1807 RELOC(dt_struct_start) = mem_start;
1808 prom_printf("Building dt structure...\n");
1809 scan_dt_build_struct(root, &mem_start, &mem_end);
1810 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1811 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1814 hdr->boot_cpuid_phys = _prom->cpu;
1815 hdr->magic = OF_DT_HEADER;
1816 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1817 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1818 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1819 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1820 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1821 hdr->version = OF_DT_VERSION;
1822 /* Version 16 is not backward compatible */
1823 hdr->last_comp_version = 0x10;
1825 /* Reserve the whole thing and copy the reserve map in, we
1826 * also bump mem_reserve_cnt to cause further reservations to
1827 * fail since it's too late.
1829 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1830 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1835 prom_printf("reserved memory map:\n");
1836 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1837 prom_printf(" %x - %x\n",
1838 RELOC(mem_reserve_map)[i].base,
1839 RELOC(mem_reserve_map)[i].size);
1842 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1844 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1845 RELOC(dt_string_start), RELOC(dt_string_end));
1846 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1847 RELOC(dt_struct_start), RELOC(dt_struct_end));
1852 static void __init fixup_device_tree(void)
1854 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1855 phandle u3, i2c, mpic;
1860 /* Some G5s have a missing interrupt definition, fix it up here */
1861 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1862 if (!PHANDLE_VALID(u3))
1864 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1865 if (!PHANDLE_VALID(i2c))
1867 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1868 if (!PHANDLE_VALID(mpic))
1871 /* check if proper rev of u3 */
1872 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1875 if (u3_rev != 0x35 && u3_rev != 0x37)
1877 /* does it need fixup ? */
1878 if (prom_getproplen(i2c, "interrupts") > 0)
1881 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1883 /* interrupt on this revision of u3 is number 0 and level */
1886 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
1888 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
1893 static void __init prom_find_boot_cpu(void)
1895 struct prom_t *_prom = &RELOC(prom);
1901 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1904 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1906 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1907 _prom->cpu = getprop_rval;
1909 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1912 static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1914 #ifdef CONFIG_BLK_DEV_INITRD
1915 struct prom_t *_prom = &RELOC(prom);
1917 if (r3 && r4 && r4 != 0xdeadbeef) {
1920 RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
1921 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
1923 val = RELOC(prom_initrd_start);
1924 prom_setprop(_prom->chosen, "linux,initrd-start", &val,
1926 val = RELOC(prom_initrd_end);
1927 prom_setprop(_prom->chosen, "linux,initrd-end", &val,
1930 reserve_mem(RELOC(prom_initrd_start),
1931 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
1933 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
1934 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
1936 #endif /* CONFIG_BLK_DEV_INITRD */
1940 * We enter here early on, when the Open Firmware prom is still
1941 * handling exceptions and the MMU hash table for us.
1944 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
1946 unsigned long r6, unsigned long r7)
1948 struct prom_t *_prom;
1951 unsigned long offset = reloc_offset();
1957 _prom = &RELOC(prom);
1960 * First zero the BSS
1962 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
1965 * Init interface to Open Firmware, get some node references,
1968 prom_init_client_services(pp);
1971 * Init prom stdout device
1976 * See if this OF is old enough that we need to do explicit maps
1981 * Check for an initrd
1983 prom_check_initrd(r3, r4);
1986 * Get default machine type. At this point, we do not differentiate
1987 * between pSeries SMP and pSeries LPAR
1989 RELOC(of_platform) = prom_find_machine_type();
1990 getprop_rval = RELOC(of_platform);
1991 prom_setprop(_prom->chosen, "linux,platform",
1992 &getprop_rval, sizeof(getprop_rval));
1994 #ifdef CONFIG_PPC_PSERIES
1996 * On pSeries, inform the firmware about our capabilities
1998 if (RELOC(of_platform) & PLATFORM_PSERIES)
1999 prom_send_capabilities();
2003 * Copy the CPU hold code
2005 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2009 * Do early parsing of command line
2011 early_cmdline_parse();
2014 * Initialize memory management within prom_init
2019 * Determine which cpu is actually running right _now_
2021 prom_find_boot_cpu();
2024 * Initialize display devices
2026 prom_check_displays();
2030 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2031 * that uses the allocator, we need to make sure we get the top of memory
2032 * available for us here...
2034 if (RELOC(of_platform) == PLATFORM_PSERIES)
2035 prom_initialize_tce_table();
2039 * On non-powermacs, try to instantiate RTAS and puts all CPUs
2040 * in spin-loops. PowerMacs don't have a working RTAS and use
2041 * a different way to spin CPUs
2043 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2044 prom_instantiate_rtas();
2049 * Fill in some infos for use by the kernel later on
2051 if (RELOC(prom_memory_limit))
2052 prom_setprop(_prom->chosen, "linux,memory-limit",
2053 &RELOC(prom_memory_limit),
2054 sizeof(prom_memory_limit));
2056 if (RELOC(ppc64_iommu_off))
2057 prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
2059 if (RELOC(iommu_force_on))
2060 prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
2062 if (RELOC(prom_tce_alloc_start)) {
2063 prom_setprop(_prom->chosen, "linux,tce-alloc-start",
2064 &RELOC(prom_tce_alloc_start),
2065 sizeof(prom_tce_alloc_start));
2066 prom_setprop(_prom->chosen, "linux,tce-alloc-end",
2067 &RELOC(prom_tce_alloc_end),
2068 sizeof(prom_tce_alloc_end));
2073 * Fixup any known bugs in the device-tree
2075 fixup_device_tree();
2078 * Now finally create the flattened device-tree
2080 prom_printf("copying OF device tree ...\n");
2081 flatten_device_tree();
2083 /* in case stdin is USB and still active on IBM machines... */
2087 * Call OF "quiesce" method to shut down pending DMA's from
2090 prom_printf("Calling quiesce ...\n");
2091 call_prom("quiesce", 0, 0);
2094 * And finally, call the kernel passing it the flattened device
2095 * tree and NULL as r5, thus triggering the new entry point which
2096 * is common to us and kexec
2098 hdr = RELOC(dt_header_start);
2099 prom_printf("returning from prom_init\n");
2100 prom_debug("->dt_header_start=0x%x\n", hdr);
2103 reloc_got2(-offset);
2106 __start(hdr, KERNELBASE + offset, 0);