prom.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003
  1. /*
  2. * Procedures for creating, accessing and interpreting the device tree.
  3. *
  4. * Paul Mackerras August 1996.
  5. * Copyright (C) 1996-2005 Paul Mackerras.
  6. *
  7. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  8. * {engebret|bergner}@us.ibm.com
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <stdarg.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/init.h>
  19. #include <linux/threads.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/stringify.h>
  24. #include <linux/delay.h>
  25. #include <linux/initrd.h>
  26. #include <linux/bitops.h>
  27. #include <linux/module.h>
  28. #include <linux/kexec.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/irq.h>
  31. #include <linux/lmb.h>
  32. #include <asm/prom.h>
  33. #include <asm/page.h>
  34. #include <asm/processor.h>
  35. #include <asm/irq.h>
  36. #include <linux/io.h>
  37. #include <asm/system.h>
  38. #include <asm/mmu.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/sections.h>
  41. #include <asm/pci-bridge.h>
  42. static int __initdata dt_root_addr_cells;
  43. static int __initdata dt_root_size_cells;
  44. typedef u32 cell_t;
  45. /* export that to outside world */
  46. struct device_node *of_chosen;
  47. int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
  48. {
  49. const char *cp;
  50. unsigned long cplen, l;
  51. cp = of_get_flat_dt_prop(node, "compatible", &cplen);
  52. if (cp == NULL)
  53. return 0;
  54. while (cplen > 0) {
  55. if (strncasecmp(cp, compat, strlen(compat)) == 0)
  56. return 1;
  57. l = strlen(cp) + 1;
  58. cp += l;
  59. cplen -= l;
  60. }
  61. return 0;
  62. }
  63. static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
  64. unsigned long align)
  65. {
  66. void *res;
  67. *mem = _ALIGN(*mem, align);
  68. res = (void *)*mem;
  69. *mem += size;
  70. return res;
  71. }
  72. static unsigned long __init unflatten_dt_node(unsigned long mem,
  73. unsigned long *p,
  74. struct device_node *dad,
  75. struct device_node ***allnextpp,
  76. unsigned long fpsize)
  77. {
  78. struct device_node *np;
  79. struct property *pp, **prev_pp = NULL;
  80. char *pathp;
  81. u32 tag;
  82. unsigned int l, allocl;
  83. int has_name = 0;
  84. int new_format = 0;
  85. tag = *((u32 *)(*p));
  86. if (tag != OF_DT_BEGIN_NODE) {
  87. printk("Weird tag at start of node: %x\n", tag);
  88. return mem;
  89. }
  90. *p += 4;
  91. pathp = (char *)*p;
  92. l = allocl = strlen(pathp) + 1;
  93. *p = _ALIGN(*p + l, 4);
  94. /* version 0x10 has a more compact unit name here instead of the full
  95. * path. we accumulate the full path size using "fpsize", we'll rebuild
  96. * it later. We detect this because the first character of the name is
  97. * not '/'.
  98. */
  99. if ((*pathp) != '/') {
  100. new_format = 1;
  101. if (fpsize == 0) {
  102. /* root node: special case. fpsize accounts for path
  103. * plus terminating zero. root node only has '/', so
  104. * fpsize should be 2, but we want to avoid the first
  105. * level nodes to have two '/' so we use fpsize 1 here
  106. */
  107. fpsize = 1;
  108. allocl = 2;
  109. } else {
  110. /* account for '/' and path size minus terminal 0
  111. * already in 'l'
  112. */
  113. fpsize += l;
  114. allocl = fpsize;
  115. }
  116. }
  117. np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
  118. __alignof__(struct device_node));
  119. if (allnextpp) {
  120. memset(np, 0, sizeof(*np));
  121. np->full_name = ((char *)np) + sizeof(struct device_node);
  122. if (new_format) {
  123. char *p2 = np->full_name;
  124. /* rebuild full path for new format */
  125. if (dad && dad->parent) {
  126. strcpy(p2, dad->full_name);
  127. #ifdef DEBUG
  128. if ((strlen(p2) + l + 1) != allocl) {
  129. pr_debug("%s: p: %d, l: %d, a: %d\n",
  130. pathp, (int)strlen(p2),
  131. l, allocl);
  132. }
  133. #endif
  134. p2 += strlen(p2);
  135. }
  136. *(p2++) = '/';
  137. memcpy(p2, pathp, l);
  138. } else
  139. memcpy(np->full_name, pathp, l);
  140. prev_pp = &np->properties;
  141. **allnextpp = np;
  142. *allnextpp = &np->allnext;
  143. if (dad != NULL) {
  144. np->parent = dad;
  145. /* we temporarily use the next field as `last_child'*/
  146. if (dad->next == NULL)
  147. dad->child = np;
  148. else
  149. dad->next->sibling = np;
  150. dad->next = np;
  151. }
  152. kref_init(&np->kref);
  153. }
  154. while (1) {
  155. u32 sz, noff;
  156. char *pname;
  157. tag = *((u32 *)(*p));
  158. if (tag == OF_DT_NOP) {
  159. *p += 4;
  160. continue;
  161. }
  162. if (tag != OF_DT_PROP)
  163. break;
  164. *p += 4;
  165. sz = *((u32 *)(*p));
  166. noff = *((u32 *)((*p) + 4));
  167. *p += 8;
  168. if (initial_boot_params->version < 0x10)
  169. *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
  170. pname = find_flat_dt_string(noff);
  171. if (pname == NULL) {
  172. printk(KERN_INFO
  173. "Can't find property name in list !\n");
  174. break;
  175. }
  176. if (strcmp(pname, "name") == 0)
  177. has_name = 1;
  178. l = strlen(pname) + 1;
  179. pp = unflatten_dt_alloc(&mem, sizeof(struct property),
  180. __alignof__(struct property));
  181. if (allnextpp) {
  182. if (strcmp(pname, "linux,phandle") == 0) {
  183. np->node = *((u32 *)*p);
  184. if (np->linux_phandle == 0)
  185. np->linux_phandle = np->node;
  186. }
  187. if (strcmp(pname, "ibm,phandle") == 0)
  188. np->linux_phandle = *((u32 *)*p);
  189. pp->name = pname;
  190. pp->length = sz;
  191. pp->value = (void *)*p;
  192. *prev_pp = pp;
  193. prev_pp = &pp->next;
  194. }
  195. *p = _ALIGN((*p) + sz, 4);
  196. }
  197. /* with version 0x10 we may not have the name property, recreate
  198. * it here from the unit name if absent
  199. */
  200. if (!has_name) {
  201. char *p1 = pathp, *ps = pathp, *pa = NULL;
  202. int sz;
  203. while (*p1) {
  204. if ((*p1) == '@')
  205. pa = p1;
  206. if ((*p1) == '/')
  207. ps = p1 + 1;
  208. p1++;
  209. }
  210. if (pa < ps)
  211. pa = p1;
  212. sz = (pa - ps) + 1;
  213. pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
  214. __alignof__(struct property));
  215. if (allnextpp) {
  216. pp->name = "name";
  217. pp->length = sz;
  218. pp->value = pp + 1;
  219. *prev_pp = pp;
  220. prev_pp = &pp->next;
  221. memcpy(pp->value, ps, sz - 1);
  222. ((char *)pp->value)[sz - 1] = 0;
  223. pr_debug("fixed up name for %s -> %s\n", pathp,
  224. (char *)pp->value);
  225. }
  226. }
  227. if (allnextpp) {
  228. *prev_pp = NULL;
  229. np->name = of_get_property(np, "name", NULL);
  230. np->type = of_get_property(np, "device_type", NULL);
  231. if (!np->name)
  232. np->name = "<NULL>";
  233. if (!np->type)
  234. np->type = "<NULL>";
  235. }
  236. while (tag == OF_DT_BEGIN_NODE) {
  237. mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
  238. tag = *((u32 *)(*p));
  239. }
  240. if (tag != OF_DT_END_NODE) {
  241. printk(KERN_INFO "Weird tag at end of node: %x\n", tag);
  242. return mem;
  243. }
  244. *p += 4;
  245. return mem;
  246. }
  247. /**
  248. * unflattens the device-tree passed by the firmware, creating the
  249. * tree of struct device_node. It also fills the "name" and "type"
  250. * pointers of the nodes so the normal device-tree walking functions
  251. * can be used (this used to be done by finish_device_tree)
  252. */
  253. void __init unflatten_device_tree(void)
  254. {
  255. unsigned long start, mem, size;
  256. struct device_node **allnextp = &allnodes;
  257. pr_debug(" -> unflatten_device_tree()\n");
  258. /* First pass, scan for size */
  259. start = ((unsigned long)initial_boot_params) +
  260. initial_boot_params->off_dt_struct;
  261. size = unflatten_dt_node(0, &start, NULL, NULL, 0);
  262. size = (size | 3) + 1;
  263. pr_debug(" size is %lx, allocating...\n", size);
  264. /* Allocate memory for the expanded device tree */
  265. mem = lmb_alloc(size + 4, __alignof__(struct device_node));
  266. mem = (unsigned long) __va(mem);
  267. ((u32 *)mem)[size / 4] = 0xdeadbeef;
  268. pr_debug(" unflattening %lx...\n", mem);
  269. /* Second pass, do actual unflattening */
  270. start = ((unsigned long)initial_boot_params) +
  271. initial_boot_params->off_dt_struct;
  272. unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
  273. if (*((u32 *)start) != OF_DT_END)
  274. printk(KERN_WARNING "Weird tag at end of tree: %08x\n",
  275. *((u32 *)start));
  276. if (((u32 *)mem)[size / 4] != 0xdeadbeef)
  277. printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
  278. ((u32 *)mem)[size / 4]);
  279. *allnextp = NULL;
  280. /* Get pointer to OF "/chosen" node for use everywhere */
  281. of_chosen = of_find_node_by_path("/chosen");
  282. if (of_chosen == NULL)
  283. of_chosen = of_find_node_by_path("/chosen@0");
  284. pr_debug(" <- unflatten_device_tree()\n");
  285. }
  286. #define early_init_dt_scan_drconf_memory(node) 0
  287. static int __init early_init_dt_scan_cpus(unsigned long node,
  288. const char *uname, int depth,
  289. void *data)
  290. {
  291. static int logical_cpuid;
  292. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  293. const u32 *intserv;
  294. int i, nthreads;
  295. int found = 0;
  296. /* We are scanning "cpu" nodes only */
  297. if (type == NULL || strcmp(type, "cpu") != 0)
  298. return 0;
  299. /* Get physical cpuid */
  300. intserv = of_get_flat_dt_prop(node, "reg", NULL);
  301. nthreads = 1;
  302. /*
  303. * Now see if any of these threads match our boot cpu.
  304. * NOTE: This must match the parsing done in smp_setup_cpu_maps.
  305. */
  306. for (i = 0; i < nthreads; i++) {
  307. /*
  308. * version 2 of the kexec param format adds the phys cpuid of
  309. * booted proc.
  310. */
  311. if (initial_boot_params && initial_boot_params->version >= 2) {
  312. if (intserv[i] ==
  313. initial_boot_params->boot_cpuid_phys) {
  314. found = 1;
  315. break;
  316. }
  317. } else {
  318. /*
  319. * Check if it's the boot-cpu, set it's hw index now,
  320. * unfortunately this format did not support booting
  321. * off secondary threads.
  322. */
  323. if (of_get_flat_dt_prop(node,
  324. "linux,boot-cpu", NULL) != NULL) {
  325. found = 1;
  326. break;
  327. }
  328. }
  329. #ifdef CONFIG_SMP
  330. /* logical cpu id is always 0 on UP kernels */
  331. logical_cpuid++;
  332. #endif
  333. }
  334. if (found) {
  335. pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
  336. intserv[i]);
  337. boot_cpuid = logical_cpuid;
  338. }
  339. return 0;
  340. }
  341. #ifdef CONFIG_BLK_DEV_INITRD
  342. static void __init early_init_dt_check_for_initrd(unsigned long node)
  343. {
  344. unsigned long l;
  345. u32 *prop;
  346. pr_debug("Looking for initrd properties... ");
  347. prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
  348. if (prop) {
  349. initrd_start = (unsigned long)
  350. __va((u32)of_read_ulong(prop, l/4));
  351. prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
  352. if (prop) {
  353. initrd_end = (unsigned long)
  354. __va((u32)of_read_ulong(prop, 1/4));
  355. initrd_below_start_ok = 1;
  356. } else {
  357. initrd_start = 0;
  358. }
  359. }
  360. pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n",
  361. initrd_start, initrd_end);
  362. }
  363. #else
  364. static inline void early_init_dt_check_for_initrd(unsigned long node)
  365. {
  366. }
  367. #endif /* CONFIG_BLK_DEV_INITRD */
  368. static int __init early_init_dt_scan_chosen(unsigned long node,
  369. const char *uname, int depth, void *data)
  370. {
  371. unsigned long l;
  372. char *p;
  373. pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  374. if (depth != 1 ||
  375. (strcmp(uname, "chosen") != 0 &&
  376. strcmp(uname, "chosen@0") != 0))
  377. return 0;
  378. #ifdef CONFIG_KEXEC
  379. lprop = (u64 *)of_get_flat_dt_prop(node,
  380. "linux,crashkernel-base", NULL);
  381. if (lprop)
  382. crashk_res.start = *lprop;
  383. lprop = (u64 *)of_get_flat_dt_prop(node,
  384. "linux,crashkernel-size", NULL);
  385. if (lprop)
  386. crashk_res.end = crashk_res.start + *lprop - 1;
  387. #endif
  388. early_init_dt_check_for_initrd(node);
  389. /* Retreive command line */
  390. p = of_get_flat_dt_prop(node, "bootargs", &l);
  391. if (p != NULL && l > 0)
  392. strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
  393. #ifdef CONFIG_CMDLINE
  394. #ifndef CONFIG_CMDLINE_FORCE
  395. if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
  396. #endif
  397. strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  398. #endif /* CONFIG_CMDLINE */
  399. pr_debug("Command line is: %s\n", cmd_line);
  400. /* break now */
  401. return 1;
  402. }
  403. static int __init early_init_dt_scan_root(unsigned long node,
  404. const char *uname, int depth, void *data)
  405. {
  406. u32 *prop;
  407. if (depth != 0)
  408. return 0;
  409. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  410. dt_root_size_cells = (prop == NULL) ? 1 : *prop;
  411. pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
  412. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  413. dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
  414. pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
  415. /* break now */
  416. return 1;
  417. }
  418. static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
  419. {
  420. cell_t *p = *cellp;
  421. *cellp = p + s;
  422. return of_read_number(p, s);
  423. }
  424. static int __init early_init_dt_scan_memory(unsigned long node,
  425. const char *uname, int depth, void *data)
  426. {
  427. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  428. cell_t *reg, *endp;
  429. unsigned long l;
  430. /* Look for the ibm,dynamic-reconfiguration-memory node */
  431. /* if (depth == 1 &&
  432. strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
  433. return early_init_dt_scan_drconf_memory(node);
  434. */
  435. /* We are scanning "memory" nodes only */
  436. if (type == NULL) {
  437. /*
  438. * The longtrail doesn't have a device_type on the
  439. * /memory node, so look for the node called /memory@0.
  440. */
  441. if (depth != 1 || strcmp(uname, "memory@0") != 0)
  442. return 0;
  443. } else if (strcmp(type, "memory") != 0)
  444. return 0;
  445. reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
  446. if (reg == NULL)
  447. reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
  448. if (reg == NULL)
  449. return 0;
  450. endp = reg + (l / sizeof(cell_t));
  451. pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
  452. uname, l, reg[0], reg[1], reg[2], reg[3]);
  453. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  454. u64 base, size;
  455. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  456. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  457. if (size == 0)
  458. continue;
  459. pr_debug(" - %llx , %llx\n", (unsigned long long)base,
  460. (unsigned long long)size);
  461. lmb_add(base, size);
  462. }
  463. return 0;
  464. }
  465. #ifdef CONFIG_PHYP_DUMP
  466. /**
  467. * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
  468. *
  469. * Function to find the largest size we need to reserve
  470. * during early boot process.
  471. *
  472. * It either looks for boot param and returns that OR
  473. * returns larger of 256 or 5% rounded down to multiples of 256MB.
  474. *
  475. */
  476. static inline unsigned long phyp_dump_calculate_reserve_size(void)
  477. {
  478. unsigned long tmp;
  479. if (phyp_dump_info->reserve_bootvar)
  480. return phyp_dump_info->reserve_bootvar;
  481. /* divide by 20 to get 5% of value */
  482. tmp = lmb_end_of_DRAM();
  483. do_div(tmp, 20);
  484. /* round it down in multiples of 256 */
  485. tmp = tmp & ~0x0FFFFFFFUL;
  486. return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
  487. }
  488. /**
  489. * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
  490. *
  491. * This routine may reserve memory regions in the kernel only
  492. * if the system is supported and a dump was taken in last
  493. * boot instance or if the hardware is supported and the
  494. * scratch area needs to be setup. In other instances it returns
  495. * without reserving anything. The memory in case of dump being
  496. * active is freed when the dump is collected (by userland tools).
  497. */
  498. static void __init phyp_dump_reserve_mem(void)
  499. {
  500. unsigned long base, size;
  501. unsigned long variable_reserve_size;
  502. if (!phyp_dump_info->phyp_dump_configured) {
  503. printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
  504. return;
  505. }
  506. if (!phyp_dump_info->phyp_dump_at_boot) {
  507. printk(KERN_INFO "Phyp-dump disabled at boot time\n");
  508. return;
  509. }
  510. variable_reserve_size = phyp_dump_calculate_reserve_size();
  511. if (phyp_dump_info->phyp_dump_is_active) {
  512. /* Reserve *everything* above RMR.Area freed by userland tools*/
  513. base = variable_reserve_size;
  514. size = lmb_end_of_DRAM() - base;
  515. /* XXX crashed_ram_end is wrong, since it may be beyond
  516. * the memory_limit, it will need to be adjusted. */
  517. lmb_reserve(base, size);
  518. phyp_dump_info->init_reserve_start = base;
  519. phyp_dump_info->init_reserve_size = size;
  520. } else {
  521. size = phyp_dump_info->cpu_state_size +
  522. phyp_dump_info->hpte_region_size +
  523. variable_reserve_size;
  524. base = lmb_end_of_DRAM() - size;
  525. lmb_reserve(base, size);
  526. phyp_dump_info->init_reserve_start = base;
  527. phyp_dump_info->init_reserve_size = size;
  528. }
  529. }
  530. #else
  531. static inline void __init phyp_dump_reserve_mem(void) {}
  532. #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
  533. #ifdef CONFIG_EARLY_PRINTK
  534. /* MS this is Microblaze specifig function */
  535. static int __init early_init_dt_scan_serial(unsigned long node,
  536. const char *uname, int depth, void *data)
  537. {
  538. unsigned long l;
  539. char *p;
  540. int *addr;
  541. pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  542. /* find all serial nodes */
  543. if (strncmp(uname, "serial", 6) != 0)
  544. return 0;
  545. early_init_dt_check_for_initrd(node);
  546. /* find compatible node with uartlite */
  547. p = of_get_flat_dt_prop(node, "compatible", &l);
  548. if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
  549. (strncmp(p, "xlnx,opb-uartlite", 17) != 0))
  550. return 0;
  551. addr = of_get_flat_dt_prop(node, "reg", &l);
  552. return *addr; /* return address */
  553. }
  554. /* this function is looking for early uartlite console - Microblaze specific */
  555. int __init early_uartlite_console(void)
  556. {
  557. return of_scan_flat_dt(early_init_dt_scan_serial, NULL);
  558. }
  559. #endif
  560. void __init early_init_devtree(void *params)
  561. {
  562. pr_debug(" -> early_init_devtree(%p)\n", params);
  563. /* Setup flat device-tree pointer */
  564. initial_boot_params = params;
  565. #ifdef CONFIG_PHYP_DUMP
  566. /* scan tree to see if dump occured during last boot */
  567. of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
  568. #endif
  569. /* Retrieve various informations from the /chosen node of the
  570. * device-tree, including the platform type, initrd location and
  571. * size, TCE reserve, and more ...
  572. */
  573. of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
  574. /* Scan memory nodes and rebuild LMBs */
  575. lmb_init();
  576. of_scan_flat_dt(early_init_dt_scan_root, NULL);
  577. of_scan_flat_dt(early_init_dt_scan_memory, NULL);
  578. /* Save command line for /proc/cmdline and then parse parameters */
  579. strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
  580. parse_early_param();
  581. lmb_analyze();
  582. pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
  583. pr_debug("Scanning CPUs ...\n");
  584. /* Retreive CPU related informations from the flat tree
  585. * (altivec support, boot CPU ID, ...)
  586. */
  587. of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
  588. pr_debug(" <- early_init_devtree()\n");
  589. }
  590. /**
  591. * Indicates whether the root node has a given value in its
  592. * compatible property.
  593. */
  594. int machine_is_compatible(const char *compat)
  595. {
  596. struct device_node *root;
  597. int rc = 0;
  598. root = of_find_node_by_path("/");
  599. if (root) {
  600. rc = of_device_is_compatible(root, compat);
  601. of_node_put(root);
  602. }
  603. return rc;
  604. }
  605. EXPORT_SYMBOL(machine_is_compatible);
  606. /*******
  607. *
  608. * New implementation of the OF "find" APIs, return a refcounted
  609. * object, call of_node_put() when done. The device tree and list
  610. * are protected by a rw_lock.
  611. *
  612. * Note that property management will need some locking as well,
  613. * this isn't dealt with yet.
  614. *
  615. *******/
  616. /**
  617. * of_find_node_by_phandle - Find a node given a phandle
  618. * @handle: phandle of the node to find
  619. *
  620. * Returns a node pointer with refcount incremented, use
  621. * of_node_put() on it when done.
  622. */
  623. struct device_node *of_find_node_by_phandle(phandle handle)
  624. {
  625. struct device_node *np;
  626. read_lock(&devtree_lock);
  627. for (np = allnodes; np != NULL; np = np->allnext)
  628. if (np->linux_phandle == handle)
  629. break;
  630. of_node_get(np);
  631. read_unlock(&devtree_lock);
  632. return np;
  633. }
  634. EXPORT_SYMBOL(of_find_node_by_phandle);
  635. /**
  636. * of_node_get - Increment refcount of a node
  637. * @node: Node to inc refcount, NULL is supported to
  638. * simplify writing of callers
  639. *
  640. * Returns node.
  641. */
  642. struct device_node *of_node_get(struct device_node *node)
  643. {
  644. if (node)
  645. kref_get(&node->kref);
  646. return node;
  647. }
  648. EXPORT_SYMBOL(of_node_get);
  649. static inline struct device_node *kref_to_device_node(struct kref *kref)
  650. {
  651. return container_of(kref, struct device_node, kref);
  652. }
  653. /**
  654. * of_node_release - release a dynamically allocated node
  655. * @kref: kref element of the node to be released
  656. *
  657. * In of_node_put() this function is passed to kref_put()
  658. * as the destructor.
  659. */
  660. static void of_node_release(struct kref *kref)
  661. {
  662. struct device_node *node = kref_to_device_node(kref);
  663. struct property *prop = node->properties;
  664. /* We should never be releasing nodes that haven't been detached. */
  665. if (!of_node_check_flag(node, OF_DETACHED)) {
  666. printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
  667. node->full_name);
  668. dump_stack();
  669. kref_init(&node->kref);
  670. return;
  671. }
  672. if (!of_node_check_flag(node, OF_DYNAMIC))
  673. return;
  674. while (prop) {
  675. struct property *next = prop->next;
  676. kfree(prop->name);
  677. kfree(prop->value);
  678. kfree(prop);
  679. prop = next;
  680. if (!prop) {
  681. prop = node->deadprops;
  682. node->deadprops = NULL;
  683. }
  684. }
  685. kfree(node->full_name);
  686. kfree(node->data);
  687. kfree(node);
  688. }
  689. /**
  690. * of_node_put - Decrement refcount of a node
  691. * @node: Node to dec refcount, NULL is supported to
  692. * simplify writing of callers
  693. *
  694. */
  695. void of_node_put(struct device_node *node)
  696. {
  697. if (node)
  698. kref_put(&node->kref, of_node_release);
  699. }
  700. EXPORT_SYMBOL(of_node_put);
  701. /*
  702. * Plug a device node into the tree and global list.
  703. */
  704. void of_attach_node(struct device_node *np)
  705. {
  706. unsigned long flags;
  707. write_lock_irqsave(&devtree_lock, flags);
  708. np->sibling = np->parent->child;
  709. np->allnext = allnodes;
  710. np->parent->child = np;
  711. allnodes = np;
  712. write_unlock_irqrestore(&devtree_lock, flags);
  713. }
  714. /*
  715. * "Unplug" a node from the device tree. The caller must hold
  716. * a reference to the node. The memory associated with the node
  717. * is not freed until its refcount goes to zero.
  718. */
  719. void of_detach_node(struct device_node *np)
  720. {
  721. struct device_node *parent;
  722. unsigned long flags;
  723. write_lock_irqsave(&devtree_lock, flags);
  724. parent = np->parent;
  725. if (!parent)
  726. goto out_unlock;
  727. if (allnodes == np)
  728. allnodes = np->allnext;
  729. else {
  730. struct device_node *prev;
  731. for (prev = allnodes;
  732. prev->allnext != np;
  733. prev = prev->allnext)
  734. ;
  735. prev->allnext = np->allnext;
  736. }
  737. if (parent->child == np)
  738. parent->child = np->sibling;
  739. else {
  740. struct device_node *prevsib;
  741. for (prevsib = np->parent->child;
  742. prevsib->sibling != np;
  743. prevsib = prevsib->sibling)
  744. ;
  745. prevsib->sibling = np->sibling;
  746. }
  747. of_node_set_flag(np, OF_DETACHED);
  748. out_unlock:
  749. write_unlock_irqrestore(&devtree_lock, flags);
  750. }
  751. /*
  752. * Add a property to a node
  753. */
  754. int prom_add_property(struct device_node *np, struct property *prop)
  755. {
  756. struct property **next;
  757. unsigned long flags;
  758. prop->next = NULL;
  759. write_lock_irqsave(&devtree_lock, flags);
  760. next = &np->properties;
  761. while (*next) {
  762. if (strcmp(prop->name, (*next)->name) == 0) {
  763. /* duplicate ! don't insert it */
  764. write_unlock_irqrestore(&devtree_lock, flags);
  765. return -1;
  766. }
  767. next = &(*next)->next;
  768. }
  769. *next = prop;
  770. write_unlock_irqrestore(&devtree_lock, flags);
  771. #ifdef CONFIG_PROC_DEVICETREE
  772. /* try to add to proc as well if it was initialized */
  773. if (np->pde)
  774. proc_device_tree_add_prop(np->pde, prop);
  775. #endif /* CONFIG_PROC_DEVICETREE */
  776. return 0;
  777. }
  778. /*
  779. * Remove a property from a node. Note that we don't actually
  780. * remove it, since we have given out who-knows-how-many pointers
  781. * to the data using get-property. Instead we just move the property
  782. * to the "dead properties" list, so it won't be found any more.
  783. */
  784. int prom_remove_property(struct device_node *np, struct property *prop)
  785. {
  786. struct property **next;
  787. unsigned long flags;
  788. int found = 0;
  789. write_lock_irqsave(&devtree_lock, flags);
  790. next = &np->properties;
  791. while (*next) {
  792. if (*next == prop) {
  793. /* found the node */
  794. *next = prop->next;
  795. prop->next = np->deadprops;
  796. np->deadprops = prop;
  797. found = 1;
  798. break;
  799. }
  800. next = &(*next)->next;
  801. }
  802. write_unlock_irqrestore(&devtree_lock, flags);
  803. if (!found)
  804. return -ENODEV;
  805. #ifdef CONFIG_PROC_DEVICETREE
  806. /* try to remove the proc node as well */
  807. if (np->pde)
  808. proc_device_tree_remove_prop(np->pde, prop);
  809. #endif /* CONFIG_PROC_DEVICETREE */
  810. return 0;
  811. }
  812. /*
  813. * Update a property in a node. Note that we don't actually
  814. * remove it, since we have given out who-knows-how-many pointers
  815. * to the data using get-property. Instead we just move the property
  816. * to the "dead properties" list, and add the new property to the
  817. * property list
  818. */
  819. int prom_update_property(struct device_node *np,
  820. struct property *newprop,
  821. struct property *oldprop)
  822. {
  823. struct property **next;
  824. unsigned long flags;
  825. int found = 0;
  826. write_lock_irqsave(&devtree_lock, flags);
  827. next = &np->properties;
  828. while (*next) {
  829. if (*next == oldprop) {
  830. /* found the node */
  831. newprop->next = oldprop->next;
  832. *next = newprop;
  833. oldprop->next = np->deadprops;
  834. np->deadprops = oldprop;
  835. found = 1;
  836. break;
  837. }
  838. next = &(*next)->next;
  839. }
  840. write_unlock_irqrestore(&devtree_lock, flags);
  841. if (!found)
  842. return -ENODEV;
  843. #ifdef CONFIG_PROC_DEVICETREE
  844. /* try to add to proc as well if it was initialized */
  845. if (np->pde)
  846. proc_device_tree_update_prop(np->pde, newprop, oldprop);
  847. #endif /* CONFIG_PROC_DEVICETREE */
  848. return 0;
  849. }
  850. #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
  851. static struct debugfs_blob_wrapper flat_dt_blob;
  852. static int __init export_flat_device_tree(void)
  853. {
  854. struct dentry *d;
  855. flat_dt_blob.data = initial_boot_params;
  856. flat_dt_blob.size = initial_boot_params->totalsize;
  857. d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
  858. of_debugfs_root, &flat_dt_blob);
  859. if (!d)
  860. return 1;
  861. return 0;
  862. }
  863. device_initcall(export_flat_device_tree);
  864. #endif