prom.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147
  1. /*
  2. * Procedures for creating, accessing and interpreting the device tree.
  3. *
  4. * Paul Mackerras August 1996.
  5. * Copyright (C) 1996-2005 Paul Mackerras.
  6. *
  7. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  8. * {engebret|bergner}@us.ibm.com
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <stdarg.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/init.h>
  19. #include <linux/threads.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/stringify.h>
  24. #include <linux/delay.h>
  25. #include <linux/initrd.h>
  26. #include <linux/bitops.h>
  27. #include <linux/module.h>
  28. #include <linux/kexec.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/irq.h>
  31. #include <linux/lmb.h>
  32. #include <asm/prom.h>
  33. #include <asm/page.h>
  34. #include <asm/processor.h>
  35. #include <asm/irq.h>
  36. #include <linux/io.h>
  37. #include <asm/system.h>
  38. #include <asm/mmu.h>
  39. #include <asm/pgtable.h>
  40. #include <linux/pci.h>
  41. #include <asm/sections.h>
  42. #include <asm/pci-bridge.h>
  43. static int __initdata dt_root_addr_cells;
  44. static int __initdata dt_root_size_cells;
  45. typedef u32 cell_t;
  46. static struct boot_param_header *initial_boot_params;
  47. /* export that to outside world */
  48. struct device_node *of_chosen;
  49. static inline char *find_flat_dt_string(u32 offset)
  50. {
  51. return ((char *)initial_boot_params) +
  52. initial_boot_params->off_dt_strings + offset;
  53. }
  54. /**
  55. * This function is used to scan the flattened device-tree, it is
  56. * used to extract the memory informations at boot before we can
  57. * unflatten the tree
  58. */
  59. int __init of_scan_flat_dt(int (*it)(unsigned long node,
  60. const char *uname, int depth,
  61. void *data),
  62. void *data)
  63. {
  64. unsigned long p = ((unsigned long)initial_boot_params) +
  65. initial_boot_params->off_dt_struct;
  66. int rc = 0;
  67. int depth = -1;
  68. do {
  69. u32 tag = *((u32 *)p);
  70. char *pathp;
  71. p += 4;
  72. if (tag == OF_DT_END_NODE) {
  73. depth--;
  74. continue;
  75. }
  76. if (tag == OF_DT_NOP)
  77. continue;
  78. if (tag == OF_DT_END)
  79. break;
  80. if (tag == OF_DT_PROP) {
  81. u32 sz = *((u32 *)p);
  82. p += 8;
  83. if (initial_boot_params->version < 0x10)
  84. p = _ALIGN(p, sz >= 8 ? 8 : 4);
  85. p += sz;
  86. p = _ALIGN(p, 4);
  87. continue;
  88. }
  89. if (tag != OF_DT_BEGIN_NODE) {
  90. printk(KERN_WARNING "Invalid tag %x scanning flattened"
  91. " device tree !\n", tag);
  92. return -EINVAL;
  93. }
  94. depth++;
  95. pathp = (char *)p;
  96. p = _ALIGN(p + strlen(pathp) + 1, 4);
  97. if ((*pathp) == '/') {
  98. char *lp, *np;
  99. for (lp = NULL, np = pathp; *np; np++)
  100. if ((*np) == '/')
  101. lp = np+1;
  102. if (lp != NULL)
  103. pathp = lp;
  104. }
  105. rc = it(p, pathp, depth, data);
  106. if (rc != 0)
  107. break;
  108. } while (1);
  109. return rc;
  110. }
  111. unsigned long __init of_get_flat_dt_root(void)
  112. {
  113. unsigned long p = ((unsigned long)initial_boot_params) +
  114. initial_boot_params->off_dt_struct;
  115. while (*((u32 *)p) == OF_DT_NOP)
  116. p += 4;
  117. BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE);
  118. p += 4;
  119. return _ALIGN(p + strlen((char *)p) + 1, 4);
  120. }
  121. /**
  122. * This function can be used within scan_flattened_dt callback to get
  123. * access to properties
  124. */
  125. void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
  126. unsigned long *size)
  127. {
  128. unsigned long p = node;
  129. do {
  130. u32 tag = *((u32 *)p);
  131. u32 sz, noff;
  132. const char *nstr;
  133. p += 4;
  134. if (tag == OF_DT_NOP)
  135. continue;
  136. if (tag != OF_DT_PROP)
  137. return NULL;
  138. sz = *((u32 *)p);
  139. noff = *((u32 *)(p + 4));
  140. p += 8;
  141. if (initial_boot_params->version < 0x10)
  142. p = _ALIGN(p, sz >= 8 ? 8 : 4);
  143. nstr = find_flat_dt_string(noff);
  144. if (nstr == NULL) {
  145. printk(KERN_WARNING "Can't find property index"
  146. " name !\n");
  147. return NULL;
  148. }
  149. if (strcmp(name, nstr) == 0) {
  150. if (size)
  151. *size = sz;
  152. return (void *)p;
  153. }
  154. p += sz;
  155. p = _ALIGN(p, 4);
  156. } while (1);
  157. }
  158. int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
  159. {
  160. const char *cp;
  161. unsigned long cplen, l;
  162. cp = of_get_flat_dt_prop(node, "compatible", &cplen);
  163. if (cp == NULL)
  164. return 0;
  165. while (cplen > 0) {
  166. if (strncasecmp(cp, compat, strlen(compat)) == 0)
  167. return 1;
  168. l = strlen(cp) + 1;
  169. cp += l;
  170. cplen -= l;
  171. }
  172. return 0;
  173. }
  174. static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
  175. unsigned long align)
  176. {
  177. void *res;
  178. *mem = _ALIGN(*mem, align);
  179. res = (void *)*mem;
  180. *mem += size;
  181. return res;
  182. }
  183. static unsigned long __init unflatten_dt_node(unsigned long mem,
  184. unsigned long *p,
  185. struct device_node *dad,
  186. struct device_node ***allnextpp,
  187. unsigned long fpsize)
  188. {
  189. struct device_node *np;
  190. struct property *pp, **prev_pp = NULL;
  191. char *pathp;
  192. u32 tag;
  193. unsigned int l, allocl;
  194. int has_name = 0;
  195. int new_format = 0;
  196. tag = *((u32 *)(*p));
  197. if (tag != OF_DT_BEGIN_NODE) {
  198. printk("Weird tag at start of node: %x\n", tag);
  199. return mem;
  200. }
  201. *p += 4;
  202. pathp = (char *)*p;
  203. l = allocl = strlen(pathp) + 1;
  204. *p = _ALIGN(*p + l, 4);
  205. /* version 0x10 has a more compact unit name here instead of the full
  206. * path. we accumulate the full path size using "fpsize", we'll rebuild
  207. * it later. We detect this because the first character of the name is
  208. * not '/'.
  209. */
  210. if ((*pathp) != '/') {
  211. new_format = 1;
  212. if (fpsize == 0) {
  213. /* root node: special case. fpsize accounts for path
  214. * plus terminating zero. root node only has '/', so
  215. * fpsize should be 2, but we want to avoid the first
  216. * level nodes to have two '/' so we use fpsize 1 here
  217. */
  218. fpsize = 1;
  219. allocl = 2;
  220. } else {
  221. /* account for '/' and path size minus terminal 0
  222. * already in 'l'
  223. */
  224. fpsize += l;
  225. allocl = fpsize;
  226. }
  227. }
  228. np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
  229. __alignof__(struct device_node));
  230. if (allnextpp) {
  231. memset(np, 0, sizeof(*np));
  232. np->full_name = ((char *)np) + sizeof(struct device_node);
  233. if (new_format) {
  234. char *p2 = np->full_name;
  235. /* rebuild full path for new format */
  236. if (dad && dad->parent) {
  237. strcpy(p2, dad->full_name);
  238. #ifdef DEBUG
  239. if ((strlen(p2) + l + 1) != allocl) {
  240. pr_debug("%s: p: %d, l: %d, a: %d\n",
  241. pathp, (int)strlen(p2),
  242. l, allocl);
  243. }
  244. #endif
  245. p2 += strlen(p2);
  246. }
  247. *(p2++) = '/';
  248. memcpy(p2, pathp, l);
  249. } else
  250. memcpy(np->full_name, pathp, l);
  251. prev_pp = &np->properties;
  252. **allnextpp = np;
  253. *allnextpp = &np->allnext;
  254. if (dad != NULL) {
  255. np->parent = dad;
  256. /* we temporarily use the next field as `last_child'*/
  257. if (dad->next == NULL)
  258. dad->child = np;
  259. else
  260. dad->next->sibling = np;
  261. dad->next = np;
  262. }
  263. kref_init(&np->kref);
  264. }
  265. while (1) {
  266. u32 sz, noff;
  267. char *pname;
  268. tag = *((u32 *)(*p));
  269. if (tag == OF_DT_NOP) {
  270. *p += 4;
  271. continue;
  272. }
  273. if (tag != OF_DT_PROP)
  274. break;
  275. *p += 4;
  276. sz = *((u32 *)(*p));
  277. noff = *((u32 *)((*p) + 4));
  278. *p += 8;
  279. if (initial_boot_params->version < 0x10)
  280. *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
  281. pname = find_flat_dt_string(noff);
  282. if (pname == NULL) {
  283. printk(KERN_INFO
  284. "Can't find property name in list !\n");
  285. break;
  286. }
  287. if (strcmp(pname, "name") == 0)
  288. has_name = 1;
  289. l = strlen(pname) + 1;
  290. pp = unflatten_dt_alloc(&mem, sizeof(struct property),
  291. __alignof__(struct property));
  292. if (allnextpp) {
  293. if (strcmp(pname, "linux,phandle") == 0) {
  294. np->node = *((u32 *)*p);
  295. if (np->linux_phandle == 0)
  296. np->linux_phandle = np->node;
  297. }
  298. if (strcmp(pname, "ibm,phandle") == 0)
  299. np->linux_phandle = *((u32 *)*p);
  300. pp->name = pname;
  301. pp->length = sz;
  302. pp->value = (void *)*p;
  303. *prev_pp = pp;
  304. prev_pp = &pp->next;
  305. }
  306. *p = _ALIGN((*p) + sz, 4);
  307. }
  308. /* with version 0x10 we may not have the name property, recreate
  309. * it here from the unit name if absent
  310. */
  311. if (!has_name) {
  312. char *p1 = pathp, *ps = pathp, *pa = NULL;
  313. int sz;
  314. while (*p1) {
  315. if ((*p1) == '@')
  316. pa = p1;
  317. if ((*p1) == '/')
  318. ps = p1 + 1;
  319. p1++;
  320. }
  321. if (pa < ps)
  322. pa = p1;
  323. sz = (pa - ps) + 1;
  324. pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
  325. __alignof__(struct property));
  326. if (allnextpp) {
  327. pp->name = "name";
  328. pp->length = sz;
  329. pp->value = pp + 1;
  330. *prev_pp = pp;
  331. prev_pp = &pp->next;
  332. memcpy(pp->value, ps, sz - 1);
  333. ((char *)pp->value)[sz - 1] = 0;
  334. pr_debug("fixed up name for %s -> %s\n", pathp,
  335. (char *)pp->value);
  336. }
  337. }
  338. if (allnextpp) {
  339. *prev_pp = NULL;
  340. np->name = of_get_property(np, "name", NULL);
  341. np->type = of_get_property(np, "device_type", NULL);
  342. if (!np->name)
  343. np->name = "<NULL>";
  344. if (!np->type)
  345. np->type = "<NULL>";
  346. }
  347. while (tag == OF_DT_BEGIN_NODE) {
  348. mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
  349. tag = *((u32 *)(*p));
  350. }
  351. if (tag != OF_DT_END_NODE) {
  352. printk(KERN_INFO "Weird tag at end of node: %x\n", tag);
  353. return mem;
  354. }
  355. *p += 4;
  356. return mem;
  357. }
  358. /**
  359. * unflattens the device-tree passed by the firmware, creating the
  360. * tree of struct device_node. It also fills the "name" and "type"
  361. * pointers of the nodes so the normal device-tree walking functions
  362. * can be used (this used to be done by finish_device_tree)
  363. */
  364. void __init unflatten_device_tree(void)
  365. {
  366. unsigned long start, mem, size;
  367. struct device_node **allnextp = &allnodes;
  368. pr_debug(" -> unflatten_device_tree()\n");
  369. /* First pass, scan for size */
  370. start = ((unsigned long)initial_boot_params) +
  371. initial_boot_params->off_dt_struct;
  372. size = unflatten_dt_node(0, &start, NULL, NULL, 0);
  373. size = (size | 3) + 1;
  374. pr_debug(" size is %lx, allocating...\n", size);
  375. /* Allocate memory for the expanded device tree */
  376. mem = lmb_alloc(size + 4, __alignof__(struct device_node));
  377. mem = (unsigned long) __va(mem);
  378. ((u32 *)mem)[size / 4] = 0xdeadbeef;
  379. pr_debug(" unflattening %lx...\n", mem);
  380. /* Second pass, do actual unflattening */
  381. start = ((unsigned long)initial_boot_params) +
  382. initial_boot_params->off_dt_struct;
  383. unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
  384. if (*((u32 *)start) != OF_DT_END)
  385. printk(KERN_WARNING "Weird tag at end of tree: %08x\n",
  386. *((u32 *)start));
  387. if (((u32 *)mem)[size / 4] != 0xdeadbeef)
  388. printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
  389. ((u32 *)mem)[size / 4]);
  390. *allnextp = NULL;
  391. /* Get pointer to OF "/chosen" node for use everywhere */
  392. of_chosen = of_find_node_by_path("/chosen");
  393. if (of_chosen == NULL)
  394. of_chosen = of_find_node_by_path("/chosen@0");
  395. pr_debug(" <- unflatten_device_tree()\n");
  396. }
  397. #define early_init_dt_scan_drconf_memory(node) 0
  398. static int __init early_init_dt_scan_cpus(unsigned long node,
  399. const char *uname, int depth,
  400. void *data)
  401. {
  402. static int logical_cpuid;
  403. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  404. const u32 *intserv;
  405. int i, nthreads;
  406. int found = 0;
  407. /* We are scanning "cpu" nodes only */
  408. if (type == NULL || strcmp(type, "cpu") != 0)
  409. return 0;
  410. /* Get physical cpuid */
  411. intserv = of_get_flat_dt_prop(node, "reg", NULL);
  412. nthreads = 1;
  413. /*
  414. * Now see if any of these threads match our boot cpu.
  415. * NOTE: This must match the parsing done in smp_setup_cpu_maps.
  416. */
  417. for (i = 0; i < nthreads; i++) {
  418. /*
  419. * version 2 of the kexec param format adds the phys cpuid of
  420. * booted proc.
  421. */
  422. if (initial_boot_params && initial_boot_params->version >= 2) {
  423. if (intserv[i] ==
  424. initial_boot_params->boot_cpuid_phys) {
  425. found = 1;
  426. break;
  427. }
  428. } else {
  429. /*
  430. * Check if it's the boot-cpu, set it's hw index now,
  431. * unfortunately this format did not support booting
  432. * off secondary threads.
  433. */
  434. if (of_get_flat_dt_prop(node,
  435. "linux,boot-cpu", NULL) != NULL) {
  436. found = 1;
  437. break;
  438. }
  439. }
  440. #ifdef CONFIG_SMP
  441. /* logical cpu id is always 0 on UP kernels */
  442. logical_cpuid++;
  443. #endif
  444. }
  445. if (found) {
  446. pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
  447. intserv[i]);
  448. boot_cpuid = logical_cpuid;
  449. }
  450. return 0;
  451. }
  452. #ifdef CONFIG_BLK_DEV_INITRD
  453. static void __init early_init_dt_check_for_initrd(unsigned long node)
  454. {
  455. unsigned long l;
  456. u32 *prop;
  457. pr_debug("Looking for initrd properties... ");
  458. prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
  459. if (prop) {
  460. initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4));
  461. prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
  462. if (prop) {
  463. initrd_end = (unsigned long)
  464. __va(of_read_ulong(prop, l/4));
  465. initrd_below_start_ok = 1;
  466. } else {
  467. initrd_start = 0;
  468. }
  469. }
  470. pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n",
  471. initrd_start, initrd_end);
  472. }
  473. #else
  474. static inline void early_init_dt_check_for_initrd(unsigned long node)
  475. {
  476. }
  477. #endif /* CONFIG_BLK_DEV_INITRD */
  478. static int __init early_init_dt_scan_chosen(unsigned long node,
  479. const char *uname, int depth, void *data)
  480. {
  481. unsigned long l;
  482. char *p;
  483. pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  484. if (depth != 1 ||
  485. (strcmp(uname, "chosen") != 0 &&
  486. strcmp(uname, "chosen@0") != 0))
  487. return 0;
  488. #ifdef CONFIG_KEXEC
  489. lprop = (u64 *)of_get_flat_dt_prop(node,
  490. "linux,crashkernel-base", NULL);
  491. if (lprop)
  492. crashk_res.start = *lprop;
  493. lprop = (u64 *)of_get_flat_dt_prop(node,
  494. "linux,crashkernel-size", NULL);
  495. if (lprop)
  496. crashk_res.end = crashk_res.start + *lprop - 1;
  497. #endif
  498. early_init_dt_check_for_initrd(node);
  499. /* Retreive command line */
  500. p = of_get_flat_dt_prop(node, "bootargs", &l);
  501. if (p != NULL && l > 0)
  502. strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
  503. #ifdef CONFIG_CMDLINE
  504. if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
  505. strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  506. #endif /* CONFIG_CMDLINE */
  507. pr_debug("Command line is: %s\n", cmd_line);
  508. /* break now */
  509. return 1;
  510. }
  511. static int __init early_init_dt_scan_root(unsigned long node,
  512. const char *uname, int depth, void *data)
  513. {
  514. u32 *prop;
  515. if (depth != 0)
  516. return 0;
  517. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  518. dt_root_size_cells = (prop == NULL) ? 1 : *prop;
  519. pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
  520. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  521. dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
  522. pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
  523. /* break now */
  524. return 1;
  525. }
  526. static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
  527. {
  528. cell_t *p = *cellp;
  529. *cellp = p + s;
  530. return of_read_number(p, s);
  531. }
  532. static int __init early_init_dt_scan_memory(unsigned long node,
  533. const char *uname, int depth, void *data)
  534. {
  535. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  536. cell_t *reg, *endp;
  537. unsigned long l;
  538. /* Look for the ibm,dynamic-reconfiguration-memory node */
  539. /* if (depth == 1 &&
  540. strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
  541. return early_init_dt_scan_drconf_memory(node);
  542. */
  543. /* We are scanning "memory" nodes only */
  544. if (type == NULL) {
  545. /*
  546. * The longtrail doesn't have a device_type on the
  547. * /memory node, so look for the node called /memory@0.
  548. */
  549. if (depth != 1 || strcmp(uname, "memory@0") != 0)
  550. return 0;
  551. } else if (strcmp(type, "memory") != 0)
  552. return 0;
  553. reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
  554. if (reg == NULL)
  555. reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
  556. if (reg == NULL)
  557. return 0;
  558. endp = reg + (l / sizeof(cell_t));
  559. pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
  560. uname, l, reg[0], reg[1], reg[2], reg[3]);
  561. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  562. u64 base, size;
  563. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  564. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  565. if (size == 0)
  566. continue;
  567. pr_debug(" - %llx , %llx\n", (unsigned long long)base,
  568. (unsigned long long)size);
  569. lmb_add(base, size);
  570. }
  571. return 0;
  572. }
  573. #ifdef CONFIG_PHYP_DUMP
  574. /**
  575. * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
  576. *
  577. * Function to find the largest size we need to reserve
  578. * during early boot process.
  579. *
  580. * It either looks for boot param and returns that OR
  581. * returns larger of 256 or 5% rounded down to multiples of 256MB.
  582. *
  583. */
  584. static inline unsigned long phyp_dump_calculate_reserve_size(void)
  585. {
  586. unsigned long tmp;
  587. if (phyp_dump_info->reserve_bootvar)
  588. return phyp_dump_info->reserve_bootvar;
  589. /* divide by 20 to get 5% of value */
  590. tmp = lmb_end_of_DRAM();
  591. do_div(tmp, 20);
  592. /* round it down in multiples of 256 */
  593. tmp = tmp & ~0x0FFFFFFFUL;
  594. return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
  595. }
  596. /**
  597. * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
  598. *
  599. * This routine may reserve memory regions in the kernel only
  600. * if the system is supported and a dump was taken in last
  601. * boot instance or if the hardware is supported and the
  602. * scratch area needs to be setup. In other instances it returns
  603. * without reserving anything. The memory in case of dump being
  604. * active is freed when the dump is collected (by userland tools).
  605. */
  606. static void __init phyp_dump_reserve_mem(void)
  607. {
  608. unsigned long base, size;
  609. unsigned long variable_reserve_size;
  610. if (!phyp_dump_info->phyp_dump_configured) {
  611. printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
  612. return;
  613. }
  614. if (!phyp_dump_info->phyp_dump_at_boot) {
  615. printk(KERN_INFO "Phyp-dump disabled at boot time\n");
  616. return;
  617. }
  618. variable_reserve_size = phyp_dump_calculate_reserve_size();
  619. if (phyp_dump_info->phyp_dump_is_active) {
  620. /* Reserve *everything* above RMR.Area freed by userland tools*/
  621. base = variable_reserve_size;
  622. size = lmb_end_of_DRAM() - base;
  623. /* XXX crashed_ram_end is wrong, since it may be beyond
  624. * the memory_limit, it will need to be adjusted. */
  625. lmb_reserve(base, size);
  626. phyp_dump_info->init_reserve_start = base;
  627. phyp_dump_info->init_reserve_size = size;
  628. } else {
  629. size = phyp_dump_info->cpu_state_size +
  630. phyp_dump_info->hpte_region_size +
  631. variable_reserve_size;
  632. base = lmb_end_of_DRAM() - size;
  633. lmb_reserve(base, size);
  634. phyp_dump_info->init_reserve_start = base;
  635. phyp_dump_info->init_reserve_size = size;
  636. }
  637. }
  638. #else
  639. static inline void __init phyp_dump_reserve_mem(void) {}
  640. #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
  641. #ifdef CONFIG_EARLY_PRINTK
  642. /* MS this is Microblaze specifig function */
  643. static int __init early_init_dt_scan_serial(unsigned long node,
  644. const char *uname, int depth, void *data)
  645. {
  646. unsigned long l;
  647. char *p;
  648. int *addr;
  649. pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  650. /* find all serial nodes */
  651. if (strncmp(uname, "serial", 6) != 0)
  652. return 0;
  653. early_init_dt_check_for_initrd(node);
  654. /* find compatible node with uartlite */
  655. p = of_get_flat_dt_prop(node, "compatible", &l);
  656. if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
  657. (strncmp(p, "xlnx,opb-uartlite", 17) != 0))
  658. return 0;
  659. addr = of_get_flat_dt_prop(node, "reg", &l);
  660. return *addr; /* return address */
  661. }
  662. /* this function is looking for early uartlite console - Microblaze specific */
  663. int __init early_uartlite_console(void)
  664. {
  665. return of_scan_flat_dt(early_init_dt_scan_serial, NULL);
  666. }
  667. #endif
  668. void __init early_init_devtree(void *params)
  669. {
  670. pr_debug(" -> early_init_devtree(%p)\n", params);
  671. /* Setup flat device-tree pointer */
  672. initial_boot_params = params;
  673. #ifdef CONFIG_PHYP_DUMP
  674. /* scan tree to see if dump occured during last boot */
  675. of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
  676. #endif
  677. /* Retrieve various informations from the /chosen node of the
  678. * device-tree, including the platform type, initrd location and
  679. * size, TCE reserve, and more ...
  680. */
  681. of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
  682. /* Scan memory nodes and rebuild LMBs */
  683. lmb_init();
  684. of_scan_flat_dt(early_init_dt_scan_root, NULL);
  685. of_scan_flat_dt(early_init_dt_scan_memory, NULL);
  686. /* Save command line for /proc/cmdline and then parse parameters */
  687. strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
  688. parse_early_param();
  689. lmb_analyze();
  690. pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
  691. pr_debug("Scanning CPUs ...\n");
  692. /* Retreive CPU related informations from the flat tree
  693. * (altivec support, boot CPU ID, ...)
  694. */
  695. of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
  696. pr_debug(" <- early_init_devtree()\n");
  697. }
  698. /**
  699. * Indicates whether the root node has a given value in its
  700. * compatible property.
  701. */
  702. int machine_is_compatible(const char *compat)
  703. {
  704. struct device_node *root;
  705. int rc = 0;
  706. root = of_find_node_by_path("/");
  707. if (root) {
  708. rc = of_device_is_compatible(root, compat);
  709. of_node_put(root);
  710. }
  711. return rc;
  712. }
  713. EXPORT_SYMBOL(machine_is_compatible);
  714. /*******
  715. *
  716. * New implementation of the OF "find" APIs, return a refcounted
  717. * object, call of_node_put() when done. The device tree and list
  718. * are protected by a rw_lock.
  719. *
  720. * Note that property management will need some locking as well,
  721. * this isn't dealt with yet.
  722. *
  723. *******/
  724. /**
  725. * of_find_node_by_phandle - Find a node given a phandle
  726. * @handle: phandle of the node to find
  727. *
  728. * Returns a node pointer with refcount incremented, use
  729. * of_node_put() on it when done.
  730. */
  731. struct device_node *of_find_node_by_phandle(phandle handle)
  732. {
  733. struct device_node *np;
  734. read_lock(&devtree_lock);
  735. for (np = allnodes; np != NULL; np = np->allnext)
  736. if (np->linux_phandle == handle)
  737. break;
  738. of_node_get(np);
  739. read_unlock(&devtree_lock);
  740. return np;
  741. }
  742. EXPORT_SYMBOL(of_find_node_by_phandle);
  743. /**
  744. * of_find_all_nodes - Get next node in global list
  745. * @prev: Previous node or NULL to start iteration
  746. * of_node_put() will be called on it
  747. *
  748. * Returns a node pointer with refcount incremented, use
  749. * of_node_put() on it when done.
  750. */
  751. struct device_node *of_find_all_nodes(struct device_node *prev)
  752. {
  753. struct device_node *np;
  754. read_lock(&devtree_lock);
  755. np = prev ? prev->allnext : allnodes;
  756. for (; np != NULL; np = np->allnext)
  757. if (of_node_get(np))
  758. break;
  759. of_node_put(prev);
  760. read_unlock(&devtree_lock);
  761. return np;
  762. }
  763. EXPORT_SYMBOL(of_find_all_nodes);
  764. /**
  765. * of_node_get - Increment refcount of a node
  766. * @node: Node to inc refcount, NULL is supported to
  767. * simplify writing of callers
  768. *
  769. * Returns node.
  770. */
  771. struct device_node *of_node_get(struct device_node *node)
  772. {
  773. if (node)
  774. kref_get(&node->kref);
  775. return node;
  776. }
  777. EXPORT_SYMBOL(of_node_get);
  778. static inline struct device_node *kref_to_device_node(struct kref *kref)
  779. {
  780. return container_of(kref, struct device_node, kref);
  781. }
  782. /**
  783. * of_node_release - release a dynamically allocated node
  784. * @kref: kref element of the node to be released
  785. *
  786. * In of_node_put() this function is passed to kref_put()
  787. * as the destructor.
  788. */
  789. static void of_node_release(struct kref *kref)
  790. {
  791. struct device_node *node = kref_to_device_node(kref);
  792. struct property *prop = node->properties;
  793. /* We should never be releasing nodes that haven't been detached. */
  794. if (!of_node_check_flag(node, OF_DETACHED)) {
  795. printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
  796. node->full_name);
  797. dump_stack();
  798. kref_init(&node->kref);
  799. return;
  800. }
  801. if (!of_node_check_flag(node, OF_DYNAMIC))
  802. return;
  803. while (prop) {
  804. struct property *next = prop->next;
  805. kfree(prop->name);
  806. kfree(prop->value);
  807. kfree(prop);
  808. prop = next;
  809. if (!prop) {
  810. prop = node->deadprops;
  811. node->deadprops = NULL;
  812. }
  813. }
  814. kfree(node->full_name);
  815. kfree(node->data);
  816. kfree(node);
  817. }
  818. /**
  819. * of_node_put - Decrement refcount of a node
  820. * @node: Node to dec refcount, NULL is supported to
  821. * simplify writing of callers
  822. *
  823. */
  824. void of_node_put(struct device_node *node)
  825. {
  826. if (node)
  827. kref_put(&node->kref, of_node_release);
  828. }
  829. EXPORT_SYMBOL(of_node_put);
  830. /*
  831. * Plug a device node into the tree and global list.
  832. */
  833. void of_attach_node(struct device_node *np)
  834. {
  835. unsigned long flags;
  836. write_lock_irqsave(&devtree_lock, flags);
  837. np->sibling = np->parent->child;
  838. np->allnext = allnodes;
  839. np->parent->child = np;
  840. allnodes = np;
  841. write_unlock_irqrestore(&devtree_lock, flags);
  842. }
  843. /*
  844. * "Unplug" a node from the device tree. The caller must hold
  845. * a reference to the node. The memory associated with the node
  846. * is not freed until its refcount goes to zero.
  847. */
  848. void of_detach_node(struct device_node *np)
  849. {
  850. struct device_node *parent;
  851. unsigned long flags;
  852. write_lock_irqsave(&devtree_lock, flags);
  853. parent = np->parent;
  854. if (!parent)
  855. goto out_unlock;
  856. if (allnodes == np)
  857. allnodes = np->allnext;
  858. else {
  859. struct device_node *prev;
  860. for (prev = allnodes;
  861. prev->allnext != np;
  862. prev = prev->allnext)
  863. ;
  864. prev->allnext = np->allnext;
  865. }
  866. if (parent->child == np)
  867. parent->child = np->sibling;
  868. else {
  869. struct device_node *prevsib;
  870. for (prevsib = np->parent->child;
  871. prevsib->sibling != np;
  872. prevsib = prevsib->sibling)
  873. ;
  874. prevsib->sibling = np->sibling;
  875. }
  876. of_node_set_flag(np, OF_DETACHED);
  877. out_unlock:
  878. write_unlock_irqrestore(&devtree_lock, flags);
  879. }
  880. /*
  881. * Add a property to a node
  882. */
  883. int prom_add_property(struct device_node *np, struct property *prop)
  884. {
  885. struct property **next;
  886. unsigned long flags;
  887. prop->next = NULL;
  888. write_lock_irqsave(&devtree_lock, flags);
  889. next = &np->properties;
  890. while (*next) {
  891. if (strcmp(prop->name, (*next)->name) == 0) {
  892. /* duplicate ! don't insert it */
  893. write_unlock_irqrestore(&devtree_lock, flags);
  894. return -1;
  895. }
  896. next = &(*next)->next;
  897. }
  898. *next = prop;
  899. write_unlock_irqrestore(&devtree_lock, flags);
  900. #ifdef CONFIG_PROC_DEVICETREE
  901. /* try to add to proc as well if it was initialized */
  902. if (np->pde)
  903. proc_device_tree_add_prop(np->pde, prop);
  904. #endif /* CONFIG_PROC_DEVICETREE */
  905. return 0;
  906. }
  907. /*
  908. * Remove a property from a node. Note that we don't actually
  909. * remove it, since we have given out who-knows-how-many pointers
  910. * to the data using get-property. Instead we just move the property
  911. * to the "dead properties" list, so it won't be found any more.
  912. */
  913. int prom_remove_property(struct device_node *np, struct property *prop)
  914. {
  915. struct property **next;
  916. unsigned long flags;
  917. int found = 0;
  918. write_lock_irqsave(&devtree_lock, flags);
  919. next = &np->properties;
  920. while (*next) {
  921. if (*next == prop) {
  922. /* found the node */
  923. *next = prop->next;
  924. prop->next = np->deadprops;
  925. np->deadprops = prop;
  926. found = 1;
  927. break;
  928. }
  929. next = &(*next)->next;
  930. }
  931. write_unlock_irqrestore(&devtree_lock, flags);
  932. if (!found)
  933. return -ENODEV;
  934. #ifdef CONFIG_PROC_DEVICETREE
  935. /* try to remove the proc node as well */
  936. if (np->pde)
  937. proc_device_tree_remove_prop(np->pde, prop);
  938. #endif /* CONFIG_PROC_DEVICETREE */
  939. return 0;
  940. }
  941. /*
  942. * Update a property in a node. Note that we don't actually
  943. * remove it, since we have given out who-knows-how-many pointers
  944. * to the data using get-property. Instead we just move the property
  945. * to the "dead properties" list, and add the new property to the
  946. * property list
  947. */
  948. int prom_update_property(struct device_node *np,
  949. struct property *newprop,
  950. struct property *oldprop)
  951. {
  952. struct property **next;
  953. unsigned long flags;
  954. int found = 0;
  955. write_lock_irqsave(&devtree_lock, flags);
  956. next = &np->properties;
  957. while (*next) {
  958. if (*next == oldprop) {
  959. /* found the node */
  960. newprop->next = oldprop->next;
  961. *next = newprop;
  962. oldprop->next = np->deadprops;
  963. np->deadprops = oldprop;
  964. found = 1;
  965. break;
  966. }
  967. next = &(*next)->next;
  968. }
  969. write_unlock_irqrestore(&devtree_lock, flags);
  970. if (!found)
  971. return -ENODEV;
  972. #ifdef CONFIG_PROC_DEVICETREE
  973. /* try to add to proc as well if it was initialized */
  974. if (np->pde)
  975. proc_device_tree_update_prop(np->pde, newprop, oldprop);
  976. #endif /* CONFIG_PROC_DEVICETREE */
  977. return 0;
  978. }
  979. #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
  980. static struct debugfs_blob_wrapper flat_dt_blob;
  981. static int __init export_flat_device_tree(void)
  982. {
  983. struct dentry *d;
  984. flat_dt_blob.data = initial_boot_params;
  985. flat_dt_blob.size = initial_boot_params->totalsize;
  986. d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
  987. of_debugfs_root, &flat_dt_blob);
  988. if (!d)
  989. return 1;
  990. return 0;
  991. }
  992. device_initcall(export_flat_device_tree);
  993. #endif