prom.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. * Procedures for creating, accessing and interpreting the device tree.
  3. *
  4. * Paul Mackerras August 1996.
  5. * Copyright (C) 1996-2005 Paul Mackerras.
  6. *
  7. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  8. * {engebret|bergner}@us.ibm.com
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #include <stdarg.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/init.h>
  19. #include <linux/threads.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/stringify.h>
  24. #include <linux/delay.h>
  25. #include <linux/initrd.h>
  26. #include <linux/bitops.h>
  27. #include <linux/module.h>
  28. #include <linux/kexec.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/irq.h>
  31. #include <linux/lmb.h>
  32. #include <asm/prom.h>
  33. #include <asm/page.h>
  34. #include <asm/processor.h>
  35. #include <asm/irq.h>
  36. #include <linux/io.h>
  37. #include <asm/system.h>
  38. #include <asm/mmu.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/sections.h>
  41. #include <asm/pci-bridge.h>
  42. typedef u32 cell_t;
  43. /* export that to outside world */
  44. struct device_node *of_chosen;
  45. #define early_init_dt_scan_drconf_memory(node) 0
  46. static int __init early_init_dt_scan_cpus(unsigned long node,
  47. const char *uname, int depth,
  48. void *data)
  49. {
  50. static int logical_cpuid;
  51. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  52. const u32 *intserv;
  53. int i, nthreads;
  54. int found = 0;
  55. /* We are scanning "cpu" nodes only */
  56. if (type == NULL || strcmp(type, "cpu") != 0)
  57. return 0;
  58. /* Get physical cpuid */
  59. intserv = of_get_flat_dt_prop(node, "reg", NULL);
  60. nthreads = 1;
  61. /*
  62. * Now see if any of these threads match our boot cpu.
  63. * NOTE: This must match the parsing done in smp_setup_cpu_maps.
  64. */
  65. for (i = 0; i < nthreads; i++) {
  66. /*
  67. * version 2 of the kexec param format adds the phys cpuid of
  68. * booted proc.
  69. */
  70. if (initial_boot_params && initial_boot_params->version >= 2) {
  71. if (intserv[i] ==
  72. initial_boot_params->boot_cpuid_phys) {
  73. found = 1;
  74. break;
  75. }
  76. } else {
  77. /*
  78. * Check if it's the boot-cpu, set it's hw index now,
  79. * unfortunately this format did not support booting
  80. * off secondary threads.
  81. */
  82. if (of_get_flat_dt_prop(node,
  83. "linux,boot-cpu", NULL) != NULL) {
  84. found = 1;
  85. break;
  86. }
  87. }
  88. #ifdef CONFIG_SMP
  89. /* logical cpu id is always 0 on UP kernels */
  90. logical_cpuid++;
  91. #endif
  92. }
  93. if (found) {
  94. pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
  95. intserv[i]);
  96. boot_cpuid = logical_cpuid;
  97. }
  98. return 0;
  99. }
  100. static int __init early_init_dt_scan_chosen(unsigned long node,
  101. const char *uname, int depth, void *data)
  102. {
  103. unsigned long l;
  104. char *p;
  105. pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  106. if (depth != 1 ||
  107. (strcmp(uname, "chosen") != 0 &&
  108. strcmp(uname, "chosen@0") != 0))
  109. return 0;
  110. #ifdef CONFIG_KEXEC
  111. lprop = (u64 *)of_get_flat_dt_prop(node,
  112. "linux,crashkernel-base", NULL);
  113. if (lprop)
  114. crashk_res.start = *lprop;
  115. lprop = (u64 *)of_get_flat_dt_prop(node,
  116. "linux,crashkernel-size", NULL);
  117. if (lprop)
  118. crashk_res.end = crashk_res.start + *lprop - 1;
  119. #endif
  120. early_init_dt_check_for_initrd(node);
  121. /* Retreive command line */
  122. p = of_get_flat_dt_prop(node, "bootargs", &l);
  123. if (p != NULL && l > 0)
  124. strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
  125. #ifdef CONFIG_CMDLINE
  126. #ifndef CONFIG_CMDLINE_FORCE
  127. if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
  128. #endif
  129. strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  130. #endif /* CONFIG_CMDLINE */
  131. pr_debug("Command line is: %s\n", cmd_line);
  132. /* break now */
  133. return 1;
  134. }
  135. static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
  136. {
  137. cell_t *p = *cellp;
  138. *cellp = p + s;
  139. return of_read_number(p, s);
  140. }
  141. static int __init early_init_dt_scan_memory(unsigned long node,
  142. const char *uname, int depth, void *data)
  143. {
  144. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  145. cell_t *reg, *endp;
  146. unsigned long l;
  147. /* Look for the ibm,dynamic-reconfiguration-memory node */
  148. /* if (depth == 1 &&
  149. strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
  150. return early_init_dt_scan_drconf_memory(node);
  151. */
  152. /* We are scanning "memory" nodes only */
  153. if (type == NULL) {
  154. /*
  155. * The longtrail doesn't have a device_type on the
  156. * /memory node, so look for the node called /memory@0.
  157. */
  158. if (depth != 1 || strcmp(uname, "memory@0") != 0)
  159. return 0;
  160. } else if (strcmp(type, "memory") != 0)
  161. return 0;
  162. reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
  163. if (reg == NULL)
  164. reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
  165. if (reg == NULL)
  166. return 0;
  167. endp = reg + (l / sizeof(cell_t));
  168. pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
  169. uname, l, reg[0], reg[1], reg[2], reg[3]);
  170. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  171. u64 base, size;
  172. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  173. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  174. if (size == 0)
  175. continue;
  176. pr_debug(" - %llx , %llx\n", (unsigned long long)base,
  177. (unsigned long long)size);
  178. lmb_add(base, size);
  179. }
  180. return 0;
  181. }
  182. #ifdef CONFIG_PHYP_DUMP
  183. /**
  184. * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
  185. *
  186. * Function to find the largest size we need to reserve
  187. * during early boot process.
  188. *
  189. * It either looks for boot param and returns that OR
  190. * returns larger of 256 or 5% rounded down to multiples of 256MB.
  191. *
  192. */
  193. static inline unsigned long phyp_dump_calculate_reserve_size(void)
  194. {
  195. unsigned long tmp;
  196. if (phyp_dump_info->reserve_bootvar)
  197. return phyp_dump_info->reserve_bootvar;
  198. /* divide by 20 to get 5% of value */
  199. tmp = lmb_end_of_DRAM();
  200. do_div(tmp, 20);
  201. /* round it down in multiples of 256 */
  202. tmp = tmp & ~0x0FFFFFFFUL;
  203. return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
  204. }
  205. /**
  206. * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
  207. *
  208. * This routine may reserve memory regions in the kernel only
  209. * if the system is supported and a dump was taken in last
  210. * boot instance or if the hardware is supported and the
  211. * scratch area needs to be setup. In other instances it returns
  212. * without reserving anything. The memory in case of dump being
  213. * active is freed when the dump is collected (by userland tools).
  214. */
  215. static void __init phyp_dump_reserve_mem(void)
  216. {
  217. unsigned long base, size;
  218. unsigned long variable_reserve_size;
  219. if (!phyp_dump_info->phyp_dump_configured) {
  220. printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
  221. return;
  222. }
  223. if (!phyp_dump_info->phyp_dump_at_boot) {
  224. printk(KERN_INFO "Phyp-dump disabled at boot time\n");
  225. return;
  226. }
  227. variable_reserve_size = phyp_dump_calculate_reserve_size();
  228. if (phyp_dump_info->phyp_dump_is_active) {
  229. /* Reserve *everything* above RMR.Area freed by userland tools*/
  230. base = variable_reserve_size;
  231. size = lmb_end_of_DRAM() - base;
  232. /* XXX crashed_ram_end is wrong, since it may be beyond
  233. * the memory_limit, it will need to be adjusted. */
  234. lmb_reserve(base, size);
  235. phyp_dump_info->init_reserve_start = base;
  236. phyp_dump_info->init_reserve_size = size;
  237. } else {
  238. size = phyp_dump_info->cpu_state_size +
  239. phyp_dump_info->hpte_region_size +
  240. variable_reserve_size;
  241. base = lmb_end_of_DRAM() - size;
  242. lmb_reserve(base, size);
  243. phyp_dump_info->init_reserve_start = base;
  244. phyp_dump_info->init_reserve_size = size;
  245. }
  246. }
  247. #else
  248. static inline void __init phyp_dump_reserve_mem(void) {}
  249. #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
  250. #ifdef CONFIG_EARLY_PRINTK
  251. /* MS this is Microblaze specifig function */
  252. static int __init early_init_dt_scan_serial(unsigned long node,
  253. const char *uname, int depth, void *data)
  254. {
  255. unsigned long l;
  256. char *p;
  257. int *addr;
  258. pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  259. /* find all serial nodes */
  260. if (strncmp(uname, "serial", 6) != 0)
  261. return 0;
  262. early_init_dt_check_for_initrd(node);
  263. /* find compatible node with uartlite */
  264. p = of_get_flat_dt_prop(node, "compatible", &l);
  265. if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
  266. (strncmp(p, "xlnx,opb-uartlite", 17) != 0))
  267. return 0;
  268. addr = of_get_flat_dt_prop(node, "reg", &l);
  269. return *addr; /* return address */
  270. }
  271. /* this function is looking for early uartlite console - Microblaze specific */
  272. int __init early_uartlite_console(void)
  273. {
  274. return of_scan_flat_dt(early_init_dt_scan_serial, NULL);
  275. }
  276. #endif
  277. void __init early_init_devtree(void *params)
  278. {
  279. pr_debug(" -> early_init_devtree(%p)\n", params);
  280. /* Setup flat device-tree pointer */
  281. initial_boot_params = params;
  282. #ifdef CONFIG_PHYP_DUMP
  283. /* scan tree to see if dump occured during last boot */
  284. of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
  285. #endif
  286. /* Retrieve various informations from the /chosen node of the
  287. * device-tree, including the platform type, initrd location and
  288. * size, TCE reserve, and more ...
  289. */
  290. of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
  291. /* Scan memory nodes and rebuild LMBs */
  292. lmb_init();
  293. of_scan_flat_dt(early_init_dt_scan_root, NULL);
  294. of_scan_flat_dt(early_init_dt_scan_memory, NULL);
  295. /* Save command line for /proc/cmdline and then parse parameters */
  296. strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
  297. parse_early_param();
  298. lmb_analyze();
  299. pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
  300. pr_debug("Scanning CPUs ...\n");
  301. /* Retreive CPU related informations from the flat tree
  302. * (altivec support, boot CPU ID, ...)
  303. */
  304. of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
  305. pr_debug(" <- early_init_devtree()\n");
  306. }
  307. /**
  308. * Indicates whether the root node has a given value in its
  309. * compatible property.
  310. */
  311. int machine_is_compatible(const char *compat)
  312. {
  313. struct device_node *root;
  314. int rc = 0;
  315. root = of_find_node_by_path("/");
  316. if (root) {
  317. rc = of_device_is_compatible(root, compat);
  318. of_node_put(root);
  319. }
  320. return rc;
  321. }
  322. EXPORT_SYMBOL(machine_is_compatible);
  323. /*******
  324. *
  325. * New implementation of the OF "find" APIs, return a refcounted
  326. * object, call of_node_put() when done. The device tree and list
  327. * are protected by a rw_lock.
  328. *
  329. * Note that property management will need some locking as well,
  330. * this isn't dealt with yet.
  331. *
  332. *******/
  333. /**
  334. * of_find_node_by_phandle - Find a node given a phandle
  335. * @handle: phandle of the node to find
  336. *
  337. * Returns a node pointer with refcount incremented, use
  338. * of_node_put() on it when done.
  339. */
  340. struct device_node *of_find_node_by_phandle(phandle handle)
  341. {
  342. struct device_node *np;
  343. read_lock(&devtree_lock);
  344. for (np = allnodes; np != NULL; np = np->allnext)
  345. if (np->linux_phandle == handle)
  346. break;
  347. of_node_get(np);
  348. read_unlock(&devtree_lock);
  349. return np;
  350. }
  351. EXPORT_SYMBOL(of_find_node_by_phandle);
  352. /**
  353. * of_node_get - Increment refcount of a node
  354. * @node: Node to inc refcount, NULL is supported to
  355. * simplify writing of callers
  356. *
  357. * Returns node.
  358. */
  359. struct device_node *of_node_get(struct device_node *node)
  360. {
  361. if (node)
  362. kref_get(&node->kref);
  363. return node;
  364. }
  365. EXPORT_SYMBOL(of_node_get);
  366. static inline struct device_node *kref_to_device_node(struct kref *kref)
  367. {
  368. return container_of(kref, struct device_node, kref);
  369. }
  370. /**
  371. * of_node_release - release a dynamically allocated node
  372. * @kref: kref element of the node to be released
  373. *
  374. * In of_node_put() this function is passed to kref_put()
  375. * as the destructor.
  376. */
  377. static void of_node_release(struct kref *kref)
  378. {
  379. struct device_node *node = kref_to_device_node(kref);
  380. struct property *prop = node->properties;
  381. /* We should never be releasing nodes that haven't been detached. */
  382. if (!of_node_check_flag(node, OF_DETACHED)) {
  383. printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
  384. node->full_name);
  385. dump_stack();
  386. kref_init(&node->kref);
  387. return;
  388. }
  389. if (!of_node_check_flag(node, OF_DYNAMIC))
  390. return;
  391. while (prop) {
  392. struct property *next = prop->next;
  393. kfree(prop->name);
  394. kfree(prop->value);
  395. kfree(prop);
  396. prop = next;
  397. if (!prop) {
  398. prop = node->deadprops;
  399. node->deadprops = NULL;
  400. }
  401. }
  402. kfree(node->full_name);
  403. kfree(node->data);
  404. kfree(node);
  405. }
  406. /**
  407. * of_node_put - Decrement refcount of a node
  408. * @node: Node to dec refcount, NULL is supported to
  409. * simplify writing of callers
  410. *
  411. */
  412. void of_node_put(struct device_node *node)
  413. {
  414. if (node)
  415. kref_put(&node->kref, of_node_release);
  416. }
  417. EXPORT_SYMBOL(of_node_put);
  418. /*
  419. * Plug a device node into the tree and global list.
  420. */
  421. void of_attach_node(struct device_node *np)
  422. {
  423. unsigned long flags;
  424. write_lock_irqsave(&devtree_lock, flags);
  425. np->sibling = np->parent->child;
  426. np->allnext = allnodes;
  427. np->parent->child = np;
  428. allnodes = np;
  429. write_unlock_irqrestore(&devtree_lock, flags);
  430. }
  431. /*
  432. * "Unplug" a node from the device tree. The caller must hold
  433. * a reference to the node. The memory associated with the node
  434. * is not freed until its refcount goes to zero.
  435. */
  436. void of_detach_node(struct device_node *np)
  437. {
  438. struct device_node *parent;
  439. unsigned long flags;
  440. write_lock_irqsave(&devtree_lock, flags);
  441. parent = np->parent;
  442. if (!parent)
  443. goto out_unlock;
  444. if (allnodes == np)
  445. allnodes = np->allnext;
  446. else {
  447. struct device_node *prev;
  448. for (prev = allnodes;
  449. prev->allnext != np;
  450. prev = prev->allnext)
  451. ;
  452. prev->allnext = np->allnext;
  453. }
  454. if (parent->child == np)
  455. parent->child = np->sibling;
  456. else {
  457. struct device_node *prevsib;
  458. for (prevsib = np->parent->child;
  459. prevsib->sibling != np;
  460. prevsib = prevsib->sibling)
  461. ;
  462. prevsib->sibling = np->sibling;
  463. }
  464. of_node_set_flag(np, OF_DETACHED);
  465. out_unlock:
  466. write_unlock_irqrestore(&devtree_lock, flags);
  467. }
  468. #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
  469. static struct debugfs_blob_wrapper flat_dt_blob;
  470. static int __init export_flat_device_tree(void)
  471. {
  472. struct dentry *d;
  473. flat_dt_blob.data = initial_boot_params;
  474. flat_dt_blob.size = initial_boot_params->totalsize;
  475. d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
  476. of_debugfs_root, &flat_dt_blob);
  477. if (!d)
  478. return 1;
  479. return 0;
  480. }
  481. device_initcall(export_flat_device_tree);
  482. #endif