2 * Procedures for creating, accessing and interpreting the device tree.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/stringify.h>
28 #include <linux/delay.h>
29 #include <linux/initrd.h>
30 #include <linux/bitops.h>
31 #include <linux/module.h>
32 #include <linux/kexec.h>
38 #include <asm/processor.h>
41 #include <asm/kdump.h>
43 #include <asm/system.h>
45 #include <asm/pgtable.h>
47 #include <asm/iommu.h>
48 #include <asm/btext.h>
49 #include <asm/sections.h>
50 #include <asm/machdep.h>
51 #include <asm/pSeries_reconfig.h>
52 #include <asm/pci-bridge.h>
53 #include <asm/kexec.h>
56 #define DBG(fmt...) printk(KERN_ERR fmt)
62 static int __initdata dt_root_addr_cells;
63 static int __initdata dt_root_size_cells;
66 int __initdata iommu_is_off;
67 int __initdata iommu_force_on;
68 unsigned long tce_alloc_start, tce_alloc_end;
74 static struct boot_param_header *initial_boot_params __initdata;
76 struct boot_param_header *initial_boot_params;
79 static struct device_node *allnodes = NULL;
81 /* use when traversing tree through the allnext, child, sibling,
82 * or parent members of struct device_node.
84 static DEFINE_RWLOCK(devtree_lock);
86 /* export that to outside world */
87 struct device_node *of_chosen;
89 struct device_node *dflt_interrupt_controller;
90 int num_interrupt_controllers;
93 * Wrapper for allocating memory for various data that needs to be
94 * attached to device nodes as they are processed at boot or when
95 * added to the device tree later (e.g. DLPAR). At boot there is
96 * already a region reserved so we just increment *mem_start by size;
97 * otherwise we call kmalloc.
99 static void * prom_alloc(unsigned long size, unsigned long *mem_start)
104 return kmalloc(size, GFP_KERNEL);
112 * Find the device_node with a given phandle.
114 static struct device_node * find_phandle(phandle ph)
116 struct device_node *np;
118 for (np = allnodes; np != 0; np = np->allnext)
119 if (np->linux_phandle == ph)
125 * Find the interrupt parent of a node.
127 static struct device_node * __devinit intr_parent(struct device_node *p)
131 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
134 p = find_phandle(*parp);
138 * On a powermac booted with BootX, we don't get to know the
139 * phandles for any nodes, so find_phandle will return NULL.
140 * Fortunately these machines only have one interrupt controller
141 * so there isn't in fact any ambiguity. -- paulus
143 if (num_interrupt_controllers == 1)
144 p = dflt_interrupt_controller;
149 * Find out the size of each entry of the interrupts property
152 int __devinit prom_n_intr_cells(struct device_node *np)
154 struct device_node *p;
157 for (p = np; (p = intr_parent(p)) != NULL; ) {
158 icp = (unsigned int *)
159 get_property(p, "#interrupt-cells", NULL);
162 if (get_property(p, "interrupt-controller", NULL) != NULL
163 || get_property(p, "interrupt-map", NULL) != NULL) {
164 printk("oops, node %s doesn't have #interrupt-cells\n",
170 printk("prom_n_intr_cells failed for %s\n", np->full_name);
176 * Map an interrupt from a device up to the platform interrupt
179 static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
180 struct device_node *np, unsigned int *ints,
183 struct device_node *p, *ipar;
184 unsigned int *imap, *imask, *ip;
185 int i, imaplen, match;
186 int newintrc = 0, newaddrc = 0;
190 reg = (unsigned int *) get_property(np, "reg", NULL);
191 naddrc = prom_n_addr_cells(np);
194 if (get_property(p, "interrupt-controller", NULL) != NULL)
195 /* this node is an interrupt controller, stop here */
197 imap = (unsigned int *)
198 get_property(p, "interrupt-map", &imaplen);
203 imask = (unsigned int *)
204 get_property(p, "interrupt-map-mask", NULL);
206 printk("oops, %s has interrupt-map but no mask\n",
210 imaplen /= sizeof(unsigned int);
213 while (imaplen > 0 && !match) {
214 /* check the child-interrupt field */
216 for (i = 0; i < naddrc && match; ++i)
217 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
218 for (; i < naddrc + nintrc && match; ++i)
219 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
220 imap += naddrc + nintrc;
221 imaplen -= naddrc + nintrc;
222 /* grab the interrupt parent */
223 ipar = find_phandle((phandle) *imap++);
225 if (ipar == NULL && num_interrupt_controllers == 1)
226 /* cope with BootX not giving us phandles */
227 ipar = dflt_interrupt_controller;
229 printk("oops, no int parent %x in map of %s\n",
230 imap[-1], p->full_name);
233 /* find the parent's # addr and intr cells */
234 ip = (unsigned int *)
235 get_property(ipar, "#interrupt-cells", NULL);
237 printk("oops, no #interrupt-cells on %s\n",
242 ip = (unsigned int *)
243 get_property(ipar, "#address-cells", NULL);
244 newaddrc = (ip == NULL)? 0: *ip;
245 imap += newaddrc + newintrc;
246 imaplen -= newaddrc + newintrc;
249 printk("oops, error decoding int-map on %s, len=%d\n",
250 p->full_name, imaplen);
255 printk("oops, no match in %s int-map for %s\n",
256 p->full_name, np->full_name);
263 ints = imap - nintrc;
268 printk("hmmm, int tree for %s doesn't have ctrler\n",
278 static unsigned char map_isa_senses[4] = {
279 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
280 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
281 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
282 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
285 static unsigned char map_mpic_senses[4] = {
286 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
287 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
288 /* 2 seems to be used for the 8259 cascade... */
289 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
290 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
293 static int __devinit finish_node_interrupts(struct device_node *np,
294 unsigned long *mem_start,
298 int intlen, intrcells, intrcount;
300 unsigned int *irq, virq;
301 struct device_node *ic;
304 //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
305 #define TRACE(fmt...)
307 if (!strcmp(np->name, "smu-doorbell"))
310 TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
311 num_interrupt_controllers);
313 if (num_interrupt_controllers == 0) {
315 * Old machines just have a list of interrupt numbers
316 * and no interrupt-controller nodes.
318 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
320 /* XXX old interpret_pci_props looked in parent too */
321 /* XXX old interpret_macio_props looked for interrupts
322 before AAPL,interrupts */
324 ints = (unsigned int *) get_property(np, "interrupts",
329 np->n_intrs = intlen / sizeof(unsigned int);
330 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
337 for (i = 0; i < np->n_intrs; ++i) {
338 np->intrs[i].line = *ints++;
339 np->intrs[i].sense = IRQ_SENSE_LEVEL
340 | IRQ_POLARITY_NEGATIVE;
345 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
346 TRACE("ints=%p, intlen=%d\n", ints, intlen);
349 intrcells = prom_n_intr_cells(np);
350 intlen /= intrcells * sizeof(unsigned int);
351 TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
352 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
360 for (i = 0; i < intlen; ++i, ints += intrcells) {
361 n = map_interrupt(&irq, &ic, np, ints, intrcells);
362 TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
366 /* don't map IRQ numbers under a cascaded 8259 controller */
367 if (ic && device_is_compatible(ic, "chrp,iic")) {
368 np->intrs[intrcount].line = irq[0];
369 sense = (n > 1)? (irq[1] & 3): 3;
370 np->intrs[intrcount].sense = map_isa_senses[sense];
372 virq = virt_irq_create_mapping(irq[0]);
373 TRACE("virq=%d\n", virq);
375 if (virq == NO_IRQ) {
376 printk(KERN_CRIT "Could not allocate interrupt"
377 " number for %s\n", np->full_name);
381 np->intrs[intrcount].line = irq_offset_up(virq);
382 sense = (n > 1)? (irq[1] & 3): 1;
384 /* Apple uses bits in there in a different way, let's
385 * only keep the real sense bit on macs
387 if (machine_is(powermac))
389 np->intrs[intrcount].sense = map_mpic_senses[sense];
393 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
394 if (machine_is(powermac) && ic && ic->parent) {
395 char *name = get_property(ic->parent, "name", NULL);
396 if (name && !strcmp(name, "u3"))
397 np->intrs[intrcount].line += 128;
398 else if (!(name && (!strcmp(name, "mac-io") ||
399 !strcmp(name, "u4"))))
400 /* ignore other cascaded controllers, such as
404 #endif /* CONFIG_PPC64 */
406 printk("hmmm, got %d intr cells for %s:", n,
408 for (j = 0; j < n; ++j)
409 printk(" %d", irq[j]);
414 np->n_intrs = intrcount;
419 static int __devinit finish_node(struct device_node *np,
420 unsigned long *mem_start,
423 struct device_node *child;
426 rc = finish_node_interrupts(np, mem_start, measure_only);
430 for (child = np->child; child != NULL; child = child->sibling) {
431 rc = finish_node(child, mem_start, measure_only);
439 static void __init scan_interrupt_controllers(void)
441 struct device_node *np;
446 for (np = allnodes; np != NULL; np = np->allnext) {
447 ic = get_property(np, "interrupt-controller", &iclen);
448 name = get_property(np, "name", NULL);
449 /* checking iclen makes sure we don't get a false
450 match on /chosen.interrupt_controller */
452 && strcmp(name, "interrupt-controller") == 0)
453 || (ic != NULL && iclen == 0
454 && strcmp(name, "AppleKiwi"))) {
456 dflt_interrupt_controller = np;
460 num_interrupt_controllers = n;
464 * finish_device_tree is called once things are running normally
465 * (i.e. with text and data mapped to the address they were linked at).
466 * It traverses the device tree and fills in some of the additional,
467 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
468 * mapping is also initialized at this point.
470 void __init finish_device_tree(void)
472 unsigned long start, end, size = 0;
474 DBG(" -> finish_device_tree\n");
477 /* Initialize virtual IRQ map */
480 scan_interrupt_controllers();
483 * Finish device-tree (pre-parsing some properties etc...)
484 * We do this in 2 passes. One with "measure_only" set, which
485 * will only measure the amount of memory needed, then we can
486 * allocate that memory, and call finish_node again. However,
487 * we must be careful as most routines will fail nowadays when
488 * prom_alloc() returns 0, so we must make sure our first pass
489 * doesn't start at 0. We pre-initialize size to 16 for that
490 * reason and then remove those additional 16 bytes
493 finish_node(allnodes, &size, 1);
499 end = start = (unsigned long)__va(lmb_alloc(size, 128));
501 finish_node(allnodes, &end, 0);
502 BUG_ON(end != start + size);
504 DBG(" <- finish_device_tree\n");
507 static inline char *find_flat_dt_string(u32 offset)
509 return ((char *)initial_boot_params) +
510 initial_boot_params->off_dt_strings + offset;
514 * This function is used to scan the flattened device-tree, it is
515 * used to extract the memory informations at boot before we can
518 int __init of_scan_flat_dt(int (*it)(unsigned long node,
519 const char *uname, int depth,
523 unsigned long p = ((unsigned long)initial_boot_params) +
524 initial_boot_params->off_dt_struct;
529 u32 tag = *((u32 *)p);
533 if (tag == OF_DT_END_NODE) {
537 if (tag == OF_DT_NOP)
539 if (tag == OF_DT_END)
541 if (tag == OF_DT_PROP) {
542 u32 sz = *((u32 *)p);
544 if (initial_boot_params->version < 0x10)
545 p = _ALIGN(p, sz >= 8 ? 8 : 4);
550 if (tag != OF_DT_BEGIN_NODE) {
551 printk(KERN_WARNING "Invalid tag %x scanning flattened"
552 " device tree !\n", tag);
557 p = _ALIGN(p + strlen(pathp) + 1, 4);
558 if ((*pathp) == '/') {
560 for (lp = NULL, np = pathp; *np; np++)
566 rc = it(p, pathp, depth, data);
574 unsigned long __init of_get_flat_dt_root(void)
576 unsigned long p = ((unsigned long)initial_boot_params) +
577 initial_boot_params->off_dt_struct;
579 while(*((u32 *)p) == OF_DT_NOP)
581 BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
583 return _ALIGN(p + strlen((char *)p) + 1, 4);
587 * This function can be used within scan_flattened_dt callback to get
588 * access to properties
590 void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
593 unsigned long p = node;
596 u32 tag = *((u32 *)p);
601 if (tag == OF_DT_NOP)
603 if (tag != OF_DT_PROP)
607 noff = *((u32 *)(p + 4));
609 if (initial_boot_params->version < 0x10)
610 p = _ALIGN(p, sz >= 8 ? 8 : 4);
612 nstr = find_flat_dt_string(noff);
614 printk(KERN_WARNING "Can't find property index"
618 if (strcmp(name, nstr) == 0) {
628 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
631 unsigned long cplen, l;
633 cp = of_get_flat_dt_prop(node, "compatible", &cplen);
637 if (strncasecmp(cp, compat, strlen(compat)) == 0)
647 static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
652 *mem = _ALIGN(*mem, align);
659 static unsigned long __init unflatten_dt_node(unsigned long mem,
661 struct device_node *dad,
662 struct device_node ***allnextpp,
663 unsigned long fpsize)
665 struct device_node *np;
666 struct property *pp, **prev_pp = NULL;
669 unsigned int l, allocl;
673 tag = *((u32 *)(*p));
674 if (tag != OF_DT_BEGIN_NODE) {
675 printk("Weird tag at start of node: %x\n", tag);
680 l = allocl = strlen(pathp) + 1;
681 *p = _ALIGN(*p + l, 4);
683 /* version 0x10 has a more compact unit name here instead of the full
684 * path. we accumulate the full path size using "fpsize", we'll rebuild
685 * it later. We detect this because the first character of the name is
688 if ((*pathp) != '/') {
691 /* root node: special case. fpsize accounts for path
692 * plus terminating zero. root node only has '/', so
693 * fpsize should be 2, but we want to avoid the first
694 * level nodes to have two '/' so we use fpsize 1 here
699 /* account for '/' and path size minus terminal 0
708 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
709 __alignof__(struct device_node));
711 memset(np, 0, sizeof(*np));
712 np->full_name = ((char*)np) + sizeof(struct device_node);
714 char *p = np->full_name;
715 /* rebuild full path for new format */
716 if (dad && dad->parent) {
717 strcpy(p, dad->full_name);
719 if ((strlen(p) + l + 1) != allocl) {
720 DBG("%s: p: %d, l: %d, a: %d\n",
721 pathp, (int)strlen(p), l, allocl);
729 memcpy(np->full_name, pathp, l);
730 prev_pp = &np->properties;
732 *allnextpp = &np->allnext;
735 /* we temporarily use the next field as `last_child'*/
739 dad->next->sibling = np;
742 kref_init(&np->kref);
748 tag = *((u32 *)(*p));
749 if (tag == OF_DT_NOP) {
753 if (tag != OF_DT_PROP)
757 noff = *((u32 *)((*p) + 4));
759 if (initial_boot_params->version < 0x10)
760 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
762 pname = find_flat_dt_string(noff);
764 printk("Can't find property name in list !\n");
767 if (strcmp(pname, "name") == 0)
769 l = strlen(pname) + 1;
770 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
771 __alignof__(struct property));
773 if (strcmp(pname, "linux,phandle") == 0) {
774 np->node = *((u32 *)*p);
775 if (np->linux_phandle == 0)
776 np->linux_phandle = np->node;
778 if (strcmp(pname, "ibm,phandle") == 0)
779 np->linux_phandle = *((u32 *)*p);
782 pp->value = (void *)*p;
786 *p = _ALIGN((*p) + sz, 4);
788 /* with version 0x10 we may not have the name property, recreate
789 * it here from the unit name if absent
792 char *p = pathp, *ps = pathp, *pa = NULL;
805 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
806 __alignof__(struct property));
810 pp->value = (unsigned char *)(pp + 1);
813 memcpy(pp->value, ps, sz - 1);
814 ((char *)pp->value)[sz - 1] = 0;
815 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
820 np->name = get_property(np, "name", NULL);
821 np->type = get_property(np, "device_type", NULL);
828 while (tag == OF_DT_BEGIN_NODE) {
829 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
830 tag = *((u32 *)(*p));
832 if (tag != OF_DT_END_NODE) {
833 printk("Weird tag at end of node: %x\n", tag);
840 static int __init early_parse_mem(char *p)
845 memory_limit = PAGE_ALIGN(memparse(p, &p));
846 DBG("memory limit = 0x%lx\n", memory_limit);
850 early_param("mem", early_parse_mem);
853 * The device tree may be allocated below our memory limit, or inside the
854 * crash kernel region for kdump. If so, move it out now.
856 static void move_device_tree(void)
858 unsigned long start, size;
861 DBG("-> move_device_tree\n");
863 start = __pa(initial_boot_params);
864 size = initial_boot_params->totalsize;
866 if ((memory_limit && (start + size) > memory_limit) ||
867 overlaps_crashkernel(start, size)) {
868 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size));
869 memcpy(p, initial_boot_params, size);
870 initial_boot_params = (struct boot_param_header *)p;
871 DBG("Moved device tree to 0x%p\n", p);
874 DBG("<- move_device_tree\n");
878 * unflattens the device-tree passed by the firmware, creating the
879 * tree of struct device_node. It also fills the "name" and "type"
880 * pointers of the nodes so the normal device-tree walking functions
881 * can be used (this used to be done by finish_device_tree)
883 void __init unflatten_device_tree(void)
885 unsigned long start, mem, size;
886 struct device_node **allnextp = &allnodes;
888 DBG(" -> unflatten_device_tree()\n");
890 /* First pass, scan for size */
891 start = ((unsigned long)initial_boot_params) +
892 initial_boot_params->off_dt_struct;
893 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
894 size = (size | 3) + 1;
896 DBG(" size is %lx, allocating...\n", size);
898 /* Allocate memory for the expanded device tree */
899 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
900 mem = (unsigned long) __va(mem);
902 ((u32 *)mem)[size / 4] = 0xdeadbeef;
904 DBG(" unflattening %lx...\n", mem);
906 /* Second pass, do actual unflattening */
907 start = ((unsigned long)initial_boot_params) +
908 initial_boot_params->off_dt_struct;
909 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
910 if (*((u32 *)start) != OF_DT_END)
911 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
912 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
913 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
914 ((u32 *)mem)[size / 4] );
917 /* Get pointer to OF "/chosen" node for use everywhere */
918 of_chosen = of_find_node_by_path("/chosen");
919 if (of_chosen == NULL)
920 of_chosen = of_find_node_by_path("/chosen@0");
922 DBG(" <- unflatten_device_tree()\n");
926 * ibm,pa-features is a per-cpu property that contains a string of
927 * attribute descriptors, each of which has a 2 byte header plus up
928 * to 254 bytes worth of processor attribute bits. First header
929 * byte specifies the number of bytes following the header.
930 * Second header byte is an "attribute-specifier" type, of which
931 * zero is the only currently-defined value.
932 * Implementation: Pass in the byte and bit offset for the feature
933 * that we are interested in. The function will return -1 if the
934 * pa-features property is missing, or a 1/0 to indicate if the feature
935 * is supported/not supported. Note that the bit numbers are
936 * big-endian to match the definition in PAPR.
938 static struct ibm_pa_feature {
939 unsigned long cpu_features; /* CPU_FTR_xxx bit */
940 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
941 unsigned char pabyte; /* byte number in ibm,pa-features */
942 unsigned char pabit; /* bit number (big-endian) */
943 unsigned char invert; /* if 1, pa bit set => clear feature */
944 } ibm_pa_features[] __initdata = {
945 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
946 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
947 {CPU_FTR_SLB, 0, 0, 2, 0},
948 {CPU_FTR_CTRL, 0, 0, 3, 0},
949 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
950 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
952 /* put this back once we know how to test if firmware does 64k IO */
953 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
957 static void __init check_cpu_pa_features(unsigned long node)
959 unsigned char *pa_ftrs;
960 unsigned long len, tablelen, i, bit;
962 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
966 /* find descriptor with type == 0 */
970 len = 2 + pa_ftrs[0];
972 return; /* descriptor 0 not found */
979 /* loop over bits we know about */
980 for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
981 struct ibm_pa_feature *fp = &ibm_pa_features[i];
983 if (fp->pabyte >= pa_ftrs[0])
985 bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
986 if (bit ^ fp->invert) {
987 cur_cpu_spec->cpu_features |= fp->cpu_features;
988 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
990 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
991 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
996 static int __init early_init_dt_scan_cpus(unsigned long node,
997 const char *uname, int depth,
1000 static int logical_cpuid = 0;
1001 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1002 #ifdef CONFIG_ALTIVEC
1010 /* We are scanning "cpu" nodes only */
1011 if (type == NULL || strcmp(type, "cpu") != 0)
1014 /* Get physical cpuid */
1015 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
1017 nthreads = len / sizeof(int);
1019 intserv = of_get_flat_dt_prop(node, "reg", NULL);
1024 * Now see if any of these threads match our boot cpu.
1025 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
1027 for (i = 0; i < nthreads; i++) {
1029 * version 2 of the kexec param format adds the phys cpuid of
1032 if (initial_boot_params && initial_boot_params->version >= 2) {
1034 initial_boot_params->boot_cpuid_phys) {
1040 * Check if it's the boot-cpu, set it's hw index now,
1041 * unfortunately this format did not support booting
1042 * off secondary threads.
1044 if (of_get_flat_dt_prop(node,
1045 "linux,boot-cpu", NULL) != NULL) {
1052 /* logical cpu id is always 0 on UP kernels */
1058 DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
1060 boot_cpuid = logical_cpuid;
1061 set_hard_smp_processor_id(boot_cpuid, intserv[i]);
1064 #ifdef CONFIG_ALTIVEC
1065 /* Check if we have a VMX and eventually update CPU features */
1066 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1067 if (prop && (*prop) > 0) {
1068 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1069 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1072 /* Same goes for Apple's "altivec" property */
1073 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1075 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1076 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1078 #endif /* CONFIG_ALTIVEC */
1080 check_cpu_pa_features(node);
1082 #ifdef CONFIG_PPC_PSERIES
1084 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1086 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1092 static int __init early_init_dt_scan_chosen(unsigned long node,
1093 const char *uname, int depth, void *data)
1095 unsigned long *lprop;
1099 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1102 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1106 /* check if iommu is forced on or off */
1107 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1109 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1113 /* mem=x on the command line is the preferred mechanism */
1114 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1116 memory_limit = *lprop;
1119 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1121 tce_alloc_start = *lprop;
1122 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1124 tce_alloc_end = *lprop;
1127 #ifdef CONFIG_PPC_RTAS
1128 /* To help early debugging via the front panel, we retrieve a minimal
1129 * set of RTAS infos now if available
1132 u64 *basep, *entryp, *sizep;
1134 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1135 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1136 sizep = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
1137 if (basep && entryp && sizep) {
1139 rtas.entry = *entryp;
1143 #endif /* CONFIG_PPC_RTAS */
1146 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
1148 crashk_res.start = *lprop;
1150 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
1152 crashk_res.end = crashk_res.start + *lprop - 1;
1155 /* Retreive command line */
1156 p = of_get_flat_dt_prop(node, "bootargs", &l);
1157 if (p != NULL && l > 0)
1158 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
1160 #ifdef CONFIG_CMDLINE
1161 if (l == 0 || (l == 1 && (*p) == 0))
1162 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1163 #endif /* CONFIG_CMDLINE */
1165 DBG("Command line is: %s\n", cmd_line);
1171 static int __init early_init_dt_scan_root(unsigned long node,
1172 const char *uname, int depth, void *data)
1179 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1180 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1181 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1183 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1184 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1185 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1191 static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1196 /* Ignore more than 2 cells */
1197 while (s > sizeof(unsigned long) / 4) {
1215 static int __init early_init_dt_scan_memory(unsigned long node,
1216 const char *uname, int depth, void *data)
1218 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1222 /* We are scanning "memory" nodes only */
1225 * The longtrail doesn't have a device_type on the
1226 * /memory node, so look for the node called /memory@0.
1228 if (depth != 1 || strcmp(uname, "memory@0") != 0)
1230 } else if (strcmp(type, "memory") != 0)
1233 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1235 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1239 endp = reg + (l / sizeof(cell_t));
1241 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
1242 uname, l, reg[0], reg[1], reg[2], reg[3]);
1244 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1245 unsigned long base, size;
1247 base = dt_mem_next_cell(dt_root_addr_cells, ®);
1248 size = dt_mem_next_cell(dt_root_size_cells, ®);
1252 DBG(" - %lx , %lx\n", base, size);
1255 if (base >= 0x80000000ul)
1257 if ((base + size) > 0x80000000ul)
1258 size = 0x80000000ul - base;
1261 lmb_add(base, size);
1266 static void __init early_reserve_mem(void)
1270 unsigned long self_base;
1271 unsigned long self_size;
1273 reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
1274 initial_boot_params->off_mem_rsvmap);
1276 /* before we do anything, lets reserve the dt blob */
1277 self_base = __pa((unsigned long)initial_boot_params);
1278 self_size = initial_boot_params->totalsize;
1279 lmb_reserve(self_base, self_size);
1283 * Handle the case where we might be booting from an old kexec
1284 * image that setup the mem_rsvmap as pairs of 32-bit values
1286 if (*reserve_map > 0xffffffffull) {
1287 u32 base_32, size_32;
1288 u32 *reserve_map_32 = (u32 *)reserve_map;
1291 base_32 = *(reserve_map_32++);
1292 size_32 = *(reserve_map_32++);
1295 /* skip if the reservation is for the blob */
1296 if (base_32 == self_base && size_32 == self_size)
1298 DBG("reserving: %x -> %x\n", base_32, size_32);
1299 lmb_reserve(base_32, size_32);
1305 base = *(reserve_map++);
1306 size = *(reserve_map++);
1309 /* skip if the reservation is for the blob */
1310 if (base == self_base && size == self_size)
1312 DBG("reserving: %llx -> %llx\n", base, size);
1313 lmb_reserve(base, size);
1317 DBG("memory reserved, lmbs :\n");
1322 void __init early_init_devtree(void *params)
1324 DBG(" -> early_init_devtree()\n");
1326 /* Setup flat device-tree pointer */
1327 initial_boot_params = params;
1329 /* Retrieve various informations from the /chosen node of the
1330 * device-tree, including the platform type, initrd location and
1331 * size, TCE reserve, and more ...
1333 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1335 /* Scan memory nodes and rebuild LMBs */
1337 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1338 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1340 /* Save command line for /proc/cmdline and then parse parameters */
1341 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
1342 parse_early_param();
1344 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1345 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
1346 reserve_kdump_trampoline();
1347 reserve_crashkernel();
1348 early_reserve_mem();
1350 lmb_enforce_memory_limit(memory_limit);
1353 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1355 /* We may need to relocate the flat tree, do it now.
1356 * FIXME .. and the initrd too? */
1359 DBG("Scanning CPUs ...\n");
1361 /* Retreive CPU related informations from the flat tree
1362 * (altivec support, boot CPU ID, ...)
1364 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1366 DBG(" <- early_init_devtree()\n");
1372 prom_n_addr_cells(struct device_node* np)
1378 ip = (int *) get_property(np, "#address-cells", NULL);
1381 } while (np->parent);
1382 /* No #address-cells property for the root node, default to 1 */
1385 EXPORT_SYMBOL(prom_n_addr_cells);
1388 prom_n_size_cells(struct device_node* np)
1394 ip = (int *) get_property(np, "#size-cells", NULL);
1397 } while (np->parent);
1398 /* No #size-cells property for the root node, default to 1 */
1401 EXPORT_SYMBOL(prom_n_size_cells);
1404 * Work out the sense (active-low level / active-high edge)
1405 * of each interrupt from the device tree.
1407 void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1409 struct device_node *np;
1412 /* default to level-triggered */
1413 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1415 for (np = allnodes; np != 0; np = np->allnext) {
1416 for (j = 0; j < np->n_intrs; j++) {
1417 i = np->intrs[j].line;
1418 if (i >= off && i < max)
1419 senses[i-off] = np->intrs[j].sense;
1425 * Construct and return a list of the device_nodes with a given name.
1427 struct device_node *find_devices(const char *name)
1429 struct device_node *head, **prevp, *np;
1432 for (np = allnodes; np != 0; np = np->allnext) {
1433 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1441 EXPORT_SYMBOL(find_devices);
1444 * Construct and return a list of the device_nodes with a given type.
1446 struct device_node *find_type_devices(const char *type)
1448 struct device_node *head, **prevp, *np;
1451 for (np = allnodes; np != 0; np = np->allnext) {
1452 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1460 EXPORT_SYMBOL(find_type_devices);
1463 * Returns all nodes linked together
1465 struct device_node *find_all_nodes(void)
1467 struct device_node *head, **prevp, *np;
1470 for (np = allnodes; np != 0; np = np->allnext) {
1477 EXPORT_SYMBOL(find_all_nodes);
1479 /** Checks if the given "compat" string matches one of the strings in
1480 * the device's "compatible" property
1482 int device_is_compatible(struct device_node *device, const char *compat)
1487 cp = (char *) get_property(device, "compatible", &cplen);
1491 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1500 EXPORT_SYMBOL(device_is_compatible);
1504 * Indicates whether the root node has a given value in its
1505 * compatible property.
1507 int machine_is_compatible(const char *compat)
1509 struct device_node *root;
1512 root = of_find_node_by_path("/");
1514 rc = device_is_compatible(root, compat);
1519 EXPORT_SYMBOL(machine_is_compatible);
1522 * Construct and return a list of the device_nodes with a given type
1523 * and compatible property.
1525 struct device_node *find_compatible_devices(const char *type,
1528 struct device_node *head, **prevp, *np;
1531 for (np = allnodes; np != 0; np = np->allnext) {
1533 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1535 if (device_is_compatible(np, compat)) {
1543 EXPORT_SYMBOL(find_compatible_devices);
1546 * Find the device_node with a given full_name.
1548 struct device_node *find_path_device(const char *path)
1550 struct device_node *np;
1552 for (np = allnodes; np != 0; np = np->allnext)
1553 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1557 EXPORT_SYMBOL(find_path_device);
1561 * New implementation of the OF "find" APIs, return a refcounted
1562 * object, call of_node_put() when done. The device tree and list
1563 * are protected by a rw_lock.
1565 * Note that property management will need some locking as well,
1566 * this isn't dealt with yet.
1571 * of_find_node_by_name - Find a node by its "name" property
1572 * @from: The node to start searching from or NULL, the node
1573 * you pass will not be searched, only the next one
1574 * will; typically, you pass what the previous call
1575 * returned. of_node_put() will be called on it
1576 * @name: The name string to match against
1578 * Returns a node pointer with refcount incremented, use
1579 * of_node_put() on it when done.
1581 struct device_node *of_find_node_by_name(struct device_node *from,
1584 struct device_node *np;
1586 read_lock(&devtree_lock);
1587 np = from ? from->allnext : allnodes;
1588 for (; np != NULL; np = np->allnext)
1589 if (np->name != NULL && strcasecmp(np->name, name) == 0
1594 read_unlock(&devtree_lock);
1597 EXPORT_SYMBOL(of_find_node_by_name);
1600 * of_find_node_by_type - Find a node by its "device_type" property
1601 * @from: The node to start searching from or NULL, the node
1602 * you pass will not be searched, only the next one
1603 * will; typically, you pass what the previous call
1604 * returned. of_node_put() will be called on it
1605 * @name: The type string to match against
1607 * Returns a node pointer with refcount incremented, use
1608 * of_node_put() on it when done.
1610 struct device_node *of_find_node_by_type(struct device_node *from,
1613 struct device_node *np;
1615 read_lock(&devtree_lock);
1616 np = from ? from->allnext : allnodes;
1617 for (; np != 0; np = np->allnext)
1618 if (np->type != 0 && strcasecmp(np->type, type) == 0
1623 read_unlock(&devtree_lock);
1626 EXPORT_SYMBOL(of_find_node_by_type);
1629 * of_find_compatible_node - Find a node based on type and one of the
1630 * tokens in its "compatible" property
1631 * @from: The node to start searching from or NULL, the node
1632 * you pass will not be searched, only the next one
1633 * will; typically, you pass what the previous call
1634 * returned. of_node_put() will be called on it
1635 * @type: The type string to match "device_type" or NULL to ignore
1636 * @compatible: The string to match to one of the tokens in the device
1637 * "compatible" list.
1639 * Returns a node pointer with refcount incremented, use
1640 * of_node_put() on it when done.
1642 struct device_node *of_find_compatible_node(struct device_node *from,
1643 const char *type, const char *compatible)
1645 struct device_node *np;
1647 read_lock(&devtree_lock);
1648 np = from ? from->allnext : allnodes;
1649 for (; np != 0; np = np->allnext) {
1651 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1653 if (device_is_compatible(np, compatible) && of_node_get(np))
1658 read_unlock(&devtree_lock);
1661 EXPORT_SYMBOL(of_find_compatible_node);
1664 * of_find_node_by_path - Find a node matching a full OF path
1665 * @path: The full path to match
1667 * Returns a node pointer with refcount incremented, use
1668 * of_node_put() on it when done.
1670 struct device_node *of_find_node_by_path(const char *path)
1672 struct device_node *np = allnodes;
1674 read_lock(&devtree_lock);
1675 for (; np != 0; np = np->allnext) {
1676 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1680 read_unlock(&devtree_lock);
1683 EXPORT_SYMBOL(of_find_node_by_path);
1686 * of_find_node_by_phandle - Find a node given a phandle
1687 * @handle: phandle of the node to find
1689 * Returns a node pointer with refcount incremented, use
1690 * of_node_put() on it when done.
1692 struct device_node *of_find_node_by_phandle(phandle handle)
1694 struct device_node *np;
1696 read_lock(&devtree_lock);
1697 for (np = allnodes; np != 0; np = np->allnext)
1698 if (np->linux_phandle == handle)
1702 read_unlock(&devtree_lock);
1705 EXPORT_SYMBOL(of_find_node_by_phandle);
1708 * of_find_all_nodes - Get next node in global list
1709 * @prev: Previous node or NULL to start iteration
1710 * of_node_put() will be called on it
1712 * Returns a node pointer with refcount incremented, use
1713 * of_node_put() on it when done.
1715 struct device_node *of_find_all_nodes(struct device_node *prev)
1717 struct device_node *np;
1719 read_lock(&devtree_lock);
1720 np = prev ? prev->allnext : allnodes;
1721 for (; np != 0; np = np->allnext)
1722 if (of_node_get(np))
1726 read_unlock(&devtree_lock);
1729 EXPORT_SYMBOL(of_find_all_nodes);
1732 * of_get_parent - Get a node's parent if any
1733 * @node: Node to get parent
1735 * Returns a node pointer with refcount incremented, use
1736 * of_node_put() on it when done.
1738 struct device_node *of_get_parent(const struct device_node *node)
1740 struct device_node *np;
1745 read_lock(&devtree_lock);
1746 np = of_node_get(node->parent);
1747 read_unlock(&devtree_lock);
1750 EXPORT_SYMBOL(of_get_parent);
1753 * of_get_next_child - Iterate a node childs
1754 * @node: parent node
1755 * @prev: previous child of the parent node, or NULL to get first
1757 * Returns a node pointer with refcount incremented, use
1758 * of_node_put() on it when done.
1760 struct device_node *of_get_next_child(const struct device_node *node,
1761 struct device_node *prev)
1763 struct device_node *next;
1765 read_lock(&devtree_lock);
1766 next = prev ? prev->sibling : node->child;
1767 for (; next != 0; next = next->sibling)
1768 if (of_node_get(next))
1772 read_unlock(&devtree_lock);
1775 EXPORT_SYMBOL(of_get_next_child);
1778 * of_node_get - Increment refcount of a node
1779 * @node: Node to inc refcount, NULL is supported to
1780 * simplify writing of callers
1784 struct device_node *of_node_get(struct device_node *node)
1787 kref_get(&node->kref);
1790 EXPORT_SYMBOL(of_node_get);
1792 static inline struct device_node * kref_to_device_node(struct kref *kref)
1794 return container_of(kref, struct device_node, kref);
1798 * of_node_release - release a dynamically allocated node
1799 * @kref: kref element of the node to be released
1801 * In of_node_put() this function is passed to kref_put()
1802 * as the destructor.
1804 static void of_node_release(struct kref *kref)
1806 struct device_node *node = kref_to_device_node(kref);
1807 struct property *prop = node->properties;
1809 if (!OF_IS_DYNAMIC(node))
1812 struct property *next = prop->next;
1819 prop = node->deadprops;
1820 node->deadprops = NULL;
1824 kfree(node->full_name);
1830 * of_node_put - Decrement refcount of a node
1831 * @node: Node to dec refcount, NULL is supported to
1832 * simplify writing of callers
1835 void of_node_put(struct device_node *node)
1838 kref_put(&node->kref, of_node_release);
1840 EXPORT_SYMBOL(of_node_put);
1843 * Plug a device node into the tree and global list.
1845 void of_attach_node(struct device_node *np)
1847 write_lock(&devtree_lock);
1848 np->sibling = np->parent->child;
1849 np->allnext = allnodes;
1850 np->parent->child = np;
1852 write_unlock(&devtree_lock);
1856 * "Unplug" a node from the device tree. The caller must hold
1857 * a reference to the node. The memory associated with the node
1858 * is not freed until its refcount goes to zero.
1860 void of_detach_node(const struct device_node *np)
1862 struct device_node *parent;
1864 write_lock(&devtree_lock);
1866 parent = np->parent;
1869 allnodes = np->allnext;
1871 struct device_node *prev;
1872 for (prev = allnodes;
1873 prev->allnext != np;
1874 prev = prev->allnext)
1876 prev->allnext = np->allnext;
1879 if (parent->child == np)
1880 parent->child = np->sibling;
1882 struct device_node *prevsib;
1883 for (prevsib = np->parent->child;
1884 prevsib->sibling != np;
1885 prevsib = prevsib->sibling)
1887 prevsib->sibling = np->sibling;
1890 write_unlock(&devtree_lock);
1893 #ifdef CONFIG_PPC_PSERIES
1895 * Fix up the uninitialized fields in a new device node:
1896 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1898 * A lot of boot-time code is duplicated here, because functions such
1899 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1902 * This should probably be split up into smaller chunks.
1905 static int of_finish_dynamic_node(struct device_node *node)
1907 struct device_node *parent = of_get_parent(node);
1909 phandle *ibm_phandle;
1911 node->name = get_property(node, "name", NULL);
1912 node->type = get_property(node, "device_type", NULL);
1919 /* We don't support that function on PowerMac, at least
1922 if (machine_is(powermac))
1925 /* fix up new node's linux_phandle field */
1926 if ((ibm_phandle = (unsigned int *)get_property(node,
1927 "ibm,phandle", NULL)))
1928 node->linux_phandle = *ibm_phandle;
1931 of_node_put(parent);
1935 static int prom_reconfig_notifier(struct notifier_block *nb,
1936 unsigned long action, void *node)
1941 case PSERIES_RECONFIG_ADD:
1942 err = of_finish_dynamic_node(node);
1944 finish_node(node, NULL, 0);
1946 printk(KERN_ERR "finish_node returned %d\n", err);
1957 static struct notifier_block prom_reconfig_nb = {
1958 .notifier_call = prom_reconfig_notifier,
1959 .priority = 10, /* This one needs to run first */
1962 static int __init prom_reconfig_setup(void)
1964 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1966 __initcall(prom_reconfig_setup);
1969 struct property *of_find_property(struct device_node *np, const char *name,
1972 struct property *pp;
1974 read_lock(&devtree_lock);
1975 for (pp = np->properties; pp != 0; pp = pp->next)
1976 if (strcmp(pp->name, name) == 0) {
1981 read_unlock(&devtree_lock);
1987 * Find a property with a given name for a given node
1988 * and return the value.
1990 unsigned char *get_property(struct device_node *np, const char *name,
1993 struct property *pp = of_find_property(np,name,lenp);
1994 return pp ? pp->value : NULL;
1996 EXPORT_SYMBOL(get_property);
1999 * Add a property to a node
2001 int prom_add_property(struct device_node* np, struct property* prop)
2003 struct property **next;
2006 write_lock(&devtree_lock);
2007 next = &np->properties;
2009 if (strcmp(prop->name, (*next)->name) == 0) {
2010 /* duplicate ! don't insert it */
2011 write_unlock(&devtree_lock);
2014 next = &(*next)->next;
2017 write_unlock(&devtree_lock);
2019 #ifdef CONFIG_PROC_DEVICETREE
2020 /* try to add to proc as well if it was initialized */
2022 proc_device_tree_add_prop(np->pde, prop);
2023 #endif /* CONFIG_PROC_DEVICETREE */
2029 * Remove a property from a node. Note that we don't actually
2030 * remove it, since we have given out who-knows-how-many pointers
2031 * to the data using get-property. Instead we just move the property
2032 * to the "dead properties" list, so it won't be found any more.
2034 int prom_remove_property(struct device_node *np, struct property *prop)
2036 struct property **next;
2039 write_lock(&devtree_lock);
2040 next = &np->properties;
2042 if (*next == prop) {
2043 /* found the node */
2045 prop->next = np->deadprops;
2046 np->deadprops = prop;
2050 next = &(*next)->next;
2052 write_unlock(&devtree_lock);
2057 #ifdef CONFIG_PROC_DEVICETREE
2058 /* try to remove the proc node as well */
2060 proc_device_tree_remove_prop(np->pde, prop);
2061 #endif /* CONFIG_PROC_DEVICETREE */
2067 * Update a property in a node. Note that we don't actually
2068 * remove it, since we have given out who-knows-how-many pointers
2069 * to the data using get-property. Instead we just move the property
2070 * to the "dead properties" list, and add the new property to the
2073 int prom_update_property(struct device_node *np,
2074 struct property *newprop,
2075 struct property *oldprop)
2077 struct property **next;
2080 write_lock(&devtree_lock);
2081 next = &np->properties;
2083 if (*next == oldprop) {
2084 /* found the node */
2085 newprop->next = oldprop->next;
2087 oldprop->next = np->deadprops;
2088 np->deadprops = oldprop;
2092 next = &(*next)->next;
2094 write_unlock(&devtree_lock);
2099 #ifdef CONFIG_PROC_DEVICETREE
2100 /* try to add to proc as well if it was initialized */
2102 proc_device_tree_update_prop(np->pde, newprop, oldprop);
2103 #endif /* CONFIG_PROC_DEVICETREE */
2109 /* Find the device node for a given logical cpu number, also returns the cpu
2110 * local thread number (index in ibm,interrupt-server#s) if relevant and
2111 * asked for (non NULL)
2113 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
2116 struct device_node *np;
2118 hardid = get_hard_smp_processor_id(cpu);
2120 for_each_node_by_type(np, "cpu") {
2122 unsigned int plen, t;
2124 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
2125 * fallback to "reg" property and assume no threads
2127 intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s",
2129 if (intserv == NULL) {
2130 u32 *reg = (u32 *)get_property(np, "reg", NULL);
2133 if (*reg == hardid) {
2139 plen /= sizeof(u32);
2140 for (t = 0; t < plen; t++) {
2141 if (hardid == intserv[t]) {