prom.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. /*
  2. * Procedures for creating, accessing and interpreting the device tree.
  3. *
  4. * Paul Mackerras August 1996.
  5. * Copyright (C) 1996-2005 Paul Mackerras.
  6. *
  7. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  8. * {engebret|bergner}@us.ibm.com
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #undef DEBUG
  16. #include <stdarg.h>
  17. #include <linux/kernel.h>
  18. #include <linux/string.h>
  19. #include <linux/init.h>
  20. #include <linux/threads.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/types.h>
  23. #include <linux/pci.h>
  24. #include <linux/stringify.h>
  25. #include <linux/delay.h>
  26. #include <linux/initrd.h>
  27. #include <linux/bitops.h>
  28. #include <linux/module.h>
  29. #include <linux/kexec.h>
  30. #include <linux/debugfs.h>
  31. #include <linux/irq.h>
  32. #include <linux/memblock.h>
  33. #include <asm/prom.h>
  34. #include <asm/rtas.h>
  35. #include <asm/page.h>
  36. #include <asm/processor.h>
  37. #include <asm/irq.h>
  38. #include <asm/io.h>
  39. #include <asm/kdump.h>
  40. #include <asm/smp.h>
  41. #include <asm/system.h>
  42. #include <asm/mmu.h>
  43. #include <asm/paca.h>
  44. #include <asm/pgtable.h>
  45. #include <asm/pci.h>
  46. #include <asm/iommu.h>
  47. #include <asm/btext.h>
  48. #include <asm/sections.h>
  49. #include <asm/machdep.h>
  50. #include <asm/pSeries_reconfig.h>
  51. #include <asm/pci-bridge.h>
  52. #include <asm/phyp_dump.h>
  53. #include <asm/kexec.h>
  54. #include <mm/mmu_decl.h>
  55. #ifdef DEBUG
  56. #define DBG(fmt...) printk(KERN_ERR fmt)
  57. #else
  58. #define DBG(fmt...)
  59. #endif
  60. #ifdef CONFIG_PPC64
  61. int __initdata iommu_is_off;
  62. int __initdata iommu_force_on;
  63. unsigned long tce_alloc_start, tce_alloc_end;
  64. u64 ppc64_rma_size;
  65. #endif
  66. static phys_addr_t first_memblock_size;
  67. static int __init early_parse_mem(char *p)
  68. {
  69. if (!p)
  70. return 1;
  71. memory_limit = PAGE_ALIGN(memparse(p, &p));
  72. DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit);
  73. return 0;
  74. }
  75. early_param("mem", early_parse_mem);
  76. /**
  77. * move_device_tree - move tree to an unused area, if needed.
  78. *
  79. * The device tree may be allocated beyond our memory limit, or inside the
  80. * crash kernel region for kdump. If so, move it out of the way.
  81. */
  82. static void __init move_device_tree(void)
  83. {
  84. unsigned long start, size;
  85. void *p;
  86. DBG("-> move_device_tree\n");
  87. start = __pa(initial_boot_params);
  88. size = be32_to_cpu(initial_boot_params->totalsize);
  89. if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
  90. overlaps_crashkernel(start, size)) {
  91. p = __va(memblock_alloc(size, PAGE_SIZE));
  92. memcpy(p, initial_boot_params, size);
  93. initial_boot_params = (struct boot_param_header *)p;
  94. DBG("Moved device tree to 0x%p\n", p);
  95. }
  96. DBG("<- move_device_tree\n");
  97. }
  98. /*
  99. * ibm,pa-features is a per-cpu property that contains a string of
  100. * attribute descriptors, each of which has a 2 byte header plus up
  101. * to 254 bytes worth of processor attribute bits. First header
  102. * byte specifies the number of bytes following the header.
  103. * Second header byte is an "attribute-specifier" type, of which
  104. * zero is the only currently-defined value.
  105. * Implementation: Pass in the byte and bit offset for the feature
  106. * that we are interested in. The function will return -1 if the
  107. * pa-features property is missing, or a 1/0 to indicate if the feature
  108. * is supported/not supported. Note that the bit numbers are
  109. * big-endian to match the definition in PAPR.
  110. */
  111. static struct ibm_pa_feature {
  112. unsigned long cpu_features; /* CPU_FTR_xxx bit */
  113. unsigned long mmu_features; /* MMU_FTR_xxx bit */
  114. unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
  115. unsigned char pabyte; /* byte number in ibm,pa-features */
  116. unsigned char pabit; /* bit number (big-endian) */
  117. unsigned char invert; /* if 1, pa bit set => clear feature */
  118. } ibm_pa_features[] __initdata = {
  119. {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
  120. {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
  121. {0, MMU_FTR_SLB, 0, 0, 2, 0},
  122. {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
  123. {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
  124. {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
  125. {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
  126. {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
  127. };
  128. static void __init scan_features(unsigned long node, unsigned char *ftrs,
  129. unsigned long tablelen,
  130. struct ibm_pa_feature *fp,
  131. unsigned long ft_size)
  132. {
  133. unsigned long i, len, bit;
  134. /* find descriptor with type == 0 */
  135. for (;;) {
  136. if (tablelen < 3)
  137. return;
  138. len = 2 + ftrs[0];
  139. if (tablelen < len)
  140. return; /* descriptor 0 not found */
  141. if (ftrs[1] == 0)
  142. break;
  143. tablelen -= len;
  144. ftrs += len;
  145. }
  146. /* loop over bits we know about */
  147. for (i = 0; i < ft_size; ++i, ++fp) {
  148. if (fp->pabyte >= ftrs[0])
  149. continue;
  150. bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
  151. if (bit ^ fp->invert) {
  152. cur_cpu_spec->cpu_features |= fp->cpu_features;
  153. cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
  154. cur_cpu_spec->mmu_features |= fp->mmu_features;
  155. } else {
  156. cur_cpu_spec->cpu_features &= ~fp->cpu_features;
  157. cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
  158. cur_cpu_spec->mmu_features &= ~fp->mmu_features;
  159. }
  160. }
  161. }
  162. static void __init check_cpu_pa_features(unsigned long node)
  163. {
  164. unsigned char *pa_ftrs;
  165. unsigned long tablelen;
  166. pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
  167. if (pa_ftrs == NULL)
  168. return;
  169. scan_features(node, pa_ftrs, tablelen,
  170. ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
  171. }
  172. #ifdef CONFIG_PPC_STD_MMU_64
  173. static void __init check_cpu_slb_size(unsigned long node)
  174. {
  175. u32 *slb_size_ptr;
  176. slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
  177. if (slb_size_ptr != NULL) {
  178. mmu_slb_size = *slb_size_ptr;
  179. return;
  180. }
  181. slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
  182. if (slb_size_ptr != NULL) {
  183. mmu_slb_size = *slb_size_ptr;
  184. }
  185. }
  186. #else
  187. #define check_cpu_slb_size(node) do { } while(0)
  188. #endif
  189. static struct feature_property {
  190. const char *name;
  191. u32 min_value;
  192. unsigned long cpu_feature;
  193. unsigned long cpu_user_ftr;
  194. } feature_properties[] __initdata = {
  195. #ifdef CONFIG_ALTIVEC
  196. {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
  197. {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
  198. #endif /* CONFIG_ALTIVEC */
  199. #ifdef CONFIG_VSX
  200. /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
  201. {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
  202. #endif /* CONFIG_VSX */
  203. #ifdef CONFIG_PPC64
  204. {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
  205. {"ibm,purr", 1, CPU_FTR_PURR, 0},
  206. {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
  207. #endif /* CONFIG_PPC64 */
  208. };
  209. #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
  210. static inline void identical_pvr_fixup(unsigned long node)
  211. {
  212. unsigned int pvr;
  213. char *model = of_get_flat_dt_prop(node, "model", NULL);
  214. /*
  215. * Since 440GR(x)/440EP(x) processors have the same pvr,
  216. * we check the node path and set bit 28 in the cur_cpu_spec
  217. * pvr for EP(x) processor version. This bit is always 0 in
  218. * the "real" pvr. Then we call identify_cpu again with
  219. * the new logical pvr to enable FPU support.
  220. */
  221. if (model && strstr(model, "440EP")) {
  222. pvr = cur_cpu_spec->pvr_value | 0x8;
  223. identify_cpu(0, pvr);
  224. DBG("Using logical pvr %x for %s\n", pvr, model);
  225. }
  226. }
  227. #else
  228. #define identical_pvr_fixup(node) do { } while(0)
  229. #endif
  230. static void __init check_cpu_feature_properties(unsigned long node)
  231. {
  232. unsigned long i;
  233. struct feature_property *fp = feature_properties;
  234. const u32 *prop;
  235. for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
  236. prop = of_get_flat_dt_prop(node, fp->name, NULL);
  237. if (prop && *prop >= fp->min_value) {
  238. cur_cpu_spec->cpu_features |= fp->cpu_feature;
  239. cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
  240. }
  241. }
  242. }
  243. static int __init early_init_dt_scan_cpus(unsigned long node,
  244. const char *uname, int depth,
  245. void *data)
  246. {
  247. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  248. const u32 *prop;
  249. const u32 *intserv;
  250. int i, nthreads;
  251. unsigned long len;
  252. int found = -1;
  253. int found_thread = 0;
  254. /* We are scanning "cpu" nodes only */
  255. if (type == NULL || strcmp(type, "cpu") != 0)
  256. return 0;
  257. /* Get physical cpuid */
  258. intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
  259. if (intserv) {
  260. nthreads = len / sizeof(int);
  261. } else {
  262. intserv = of_get_flat_dt_prop(node, "reg", NULL);
  263. nthreads = 1;
  264. }
  265. /*
  266. * Now see if any of these threads match our boot cpu.
  267. * NOTE: This must match the parsing done in smp_setup_cpu_maps.
  268. */
  269. for (i = 0; i < nthreads; i++) {
  270. /*
  271. * version 2 of the kexec param format adds the phys cpuid of
  272. * booted proc.
  273. */
  274. if (initial_boot_params->version >= 2) {
  275. if (intserv[i] == initial_boot_params->boot_cpuid_phys) {
  276. found = boot_cpu_count;
  277. found_thread = i;
  278. }
  279. } else {
  280. /*
  281. * Check if it's the boot-cpu, set it's hw index now,
  282. * unfortunately this format did not support booting
  283. * off secondary threads.
  284. */
  285. if (of_get_flat_dt_prop(node,
  286. "linux,boot-cpu", NULL) != NULL)
  287. found = boot_cpu_count;
  288. }
  289. #ifdef CONFIG_SMP
  290. /* logical cpu id is always 0 on UP kernels */
  291. boot_cpu_count++;
  292. #endif
  293. }
  294. if (found >= 0) {
  295. DBG("boot cpu: logical %d physical %d\n", found,
  296. intserv[found_thread]);
  297. boot_cpuid = found;
  298. set_hard_smp_processor_id(found, intserv[found_thread]);
  299. /*
  300. * PAPR defines "logical" PVR values for cpus that
  301. * meet various levels of the architecture:
  302. * 0x0f000001 Architecture version 2.04
  303. * 0x0f000002 Architecture version 2.05
  304. * If the cpu-version property in the cpu node contains
  305. * such a value, we call identify_cpu again with the
  306. * logical PVR value in order to use the cpu feature
  307. * bits appropriate for the architecture level.
  308. *
  309. * A POWER6 partition in "POWER6 architected" mode
  310. * uses the 0x0f000002 PVR value; in POWER5+ mode
  311. * it uses 0x0f000001.
  312. */
  313. prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
  314. if (prop && (*prop & 0xff000000) == 0x0f000000)
  315. identify_cpu(0, *prop);
  316. identical_pvr_fixup(node);
  317. }
  318. check_cpu_feature_properties(node);
  319. check_cpu_pa_features(node);
  320. check_cpu_slb_size(node);
  321. #ifdef CONFIG_PPC_PSERIES
  322. if (nthreads > 1)
  323. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  324. else
  325. cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
  326. #endif
  327. return 0;
  328. }
  329. int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
  330. int depth, void *data)
  331. {
  332. unsigned long *lprop;
  333. /* Use common scan routine to determine if this is the chosen node */
  334. if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
  335. return 0;
  336. #ifdef CONFIG_PPC64
  337. /* check if iommu is forced on or off */
  338. if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
  339. iommu_is_off = 1;
  340. if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
  341. iommu_force_on = 1;
  342. #endif
  343. /* mem=x on the command line is the preferred mechanism */
  344. lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
  345. if (lprop)
  346. memory_limit = *lprop;
  347. #ifdef CONFIG_PPC64
  348. lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
  349. if (lprop)
  350. tce_alloc_start = *lprop;
  351. lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
  352. if (lprop)
  353. tce_alloc_end = *lprop;
  354. #endif
  355. #ifdef CONFIG_KEXEC
  356. lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
  357. if (lprop)
  358. crashk_res.start = *lprop;
  359. lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
  360. if (lprop)
  361. crashk_res.end = crashk_res.start + *lprop - 1;
  362. #endif
  363. /* break now */
  364. return 1;
  365. }
  366. #ifdef CONFIG_PPC_PSERIES
  367. /*
  368. * Interpret the ibm,dynamic-memory property in the
  369. * /ibm,dynamic-reconfiguration-memory node.
  370. * This contains a list of memory blocks along with NUMA affinity
  371. * information.
  372. */
  373. static int __init early_init_dt_scan_drconf_memory(unsigned long node)
  374. {
  375. __be32 *dm, *ls, *usm;
  376. unsigned long l, n, flags;
  377. u64 base, size, memblock_size;
  378. unsigned int is_kexec_kdump = 0, rngs;
  379. ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
  380. if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
  381. return 0;
  382. memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
  383. dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
  384. if (dm == NULL || l < sizeof(__be32))
  385. return 0;
  386. n = *dm++; /* number of entries */
  387. if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
  388. return 0;
  389. /* check if this is a kexec/kdump kernel. */
  390. usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
  391. &l);
  392. if (usm != NULL)
  393. is_kexec_kdump = 1;
  394. for (; n != 0; --n) {
  395. base = dt_mem_next_cell(dt_root_addr_cells, &dm);
  396. flags = dm[3];
  397. /* skip DRC index, pad, assoc. list index, flags */
  398. dm += 4;
  399. /* skip this block if the reserved bit is set in flags (0x80)
  400. or if the block is not assigned to this partition (0x8) */
  401. if ((flags & 0x80) || !(flags & 0x8))
  402. continue;
  403. size = memblock_size;
  404. rngs = 1;
  405. if (is_kexec_kdump) {
  406. /*
  407. * For each memblock in ibm,dynamic-memory, a corresponding
  408. * entry in linux,drconf-usable-memory property contains
  409. * a counter 'p' followed by 'p' (base, size) duple.
  410. * Now read the counter from
  411. * linux,drconf-usable-memory property
  412. */
  413. rngs = dt_mem_next_cell(dt_root_size_cells, &usm);
  414. if (!rngs) /* there are no (base, size) duple */
  415. continue;
  416. }
  417. do {
  418. if (is_kexec_kdump) {
  419. base = dt_mem_next_cell(dt_root_addr_cells,
  420. &usm);
  421. size = dt_mem_next_cell(dt_root_size_cells,
  422. &usm);
  423. }
  424. if (iommu_is_off) {
  425. if (base >= 0x80000000ul)
  426. continue;
  427. if ((base + size) > 0x80000000ul)
  428. size = 0x80000000ul - base;
  429. }
  430. memblock_add(base, size);
  431. } while (--rngs);
  432. }
  433. memblock_dump_all();
  434. return 0;
  435. }
  436. #else
  437. #define early_init_dt_scan_drconf_memory(node) 0
  438. #endif /* CONFIG_PPC_PSERIES */
  439. static int __init early_init_dt_scan_memory_ppc(unsigned long node,
  440. const char *uname,
  441. int depth, void *data)
  442. {
  443. if (depth == 1 &&
  444. strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
  445. return early_init_dt_scan_drconf_memory(node);
  446. return early_init_dt_scan_memory(node, uname, depth, data);
  447. }
  448. void __init early_init_dt_add_memory_arch(u64 base, u64 size)
  449. {
  450. #ifdef CONFIG_PPC64
  451. if (iommu_is_off) {
  452. if (base >= 0x80000000ul)
  453. return;
  454. if ((base + size) > 0x80000000ul)
  455. size = 0x80000000ul - base;
  456. }
  457. #endif
  458. /* Keep track of the beginning of memory -and- the size of
  459. * the very first block in the device-tree as it represents
  460. * the RMA on ppc64 server
  461. */
  462. if (base < memstart_addr) {
  463. memstart_addr = base;
  464. first_memblock_size = size;
  465. }
  466. /* Add the chunk to the MEMBLOCK list */
  467. memblock_add(base, size);
  468. }
  469. void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
  470. {
  471. return __va(memblock_alloc(size, align));
  472. }
  473. #ifdef CONFIG_BLK_DEV_INITRD
  474. void __init early_init_dt_setup_initrd_arch(unsigned long start,
  475. unsigned long end)
  476. {
  477. initrd_start = (unsigned long)__va(start);
  478. initrd_end = (unsigned long)__va(end);
  479. initrd_below_start_ok = 1;
  480. }
  481. #endif
  482. static void __init early_reserve_mem(void)
  483. {
  484. u64 base, size;
  485. u64 *reserve_map;
  486. unsigned long self_base;
  487. unsigned long self_size;
  488. reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
  489. initial_boot_params->off_mem_rsvmap);
  490. /* before we do anything, lets reserve the dt blob */
  491. self_base = __pa((unsigned long)initial_boot_params);
  492. self_size = initial_boot_params->totalsize;
  493. memblock_reserve(self_base, self_size);
  494. #ifdef CONFIG_BLK_DEV_INITRD
  495. /* then reserve the initrd, if any */
  496. if (initrd_start && (initrd_end > initrd_start))
  497. memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
  498. #endif /* CONFIG_BLK_DEV_INITRD */
  499. #ifdef CONFIG_PPC32
  500. /*
  501. * Handle the case where we might be booting from an old kexec
  502. * image that setup the mem_rsvmap as pairs of 32-bit values
  503. */
  504. if (*reserve_map > 0xffffffffull) {
  505. u32 base_32, size_32;
  506. u32 *reserve_map_32 = (u32 *)reserve_map;
  507. while (1) {
  508. base_32 = *(reserve_map_32++);
  509. size_32 = *(reserve_map_32++);
  510. if (size_32 == 0)
  511. break;
  512. /* skip if the reservation is for the blob */
  513. if (base_32 == self_base && size_32 == self_size)
  514. continue;
  515. DBG("reserving: %x -> %x\n", base_32, size_32);
  516. memblock_reserve(base_32, size_32);
  517. }
  518. return;
  519. }
  520. #endif
  521. while (1) {
  522. base = *(reserve_map++);
  523. size = *(reserve_map++);
  524. if (size == 0)
  525. break;
  526. DBG("reserving: %llx -> %llx\n", base, size);
  527. memblock_reserve(base, size);
  528. }
  529. }
  530. #ifdef CONFIG_PHYP_DUMP
  531. /**
  532. * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
  533. *
  534. * Function to find the largest size we need to reserve
  535. * during early boot process.
  536. *
  537. * It either looks for boot param and returns that OR
  538. * returns larger of 256 or 5% rounded down to multiples of 256MB.
  539. *
  540. */
  541. static inline unsigned long phyp_dump_calculate_reserve_size(void)
  542. {
  543. unsigned long tmp;
  544. if (phyp_dump_info->reserve_bootvar)
  545. return phyp_dump_info->reserve_bootvar;
  546. /* divide by 20 to get 5% of value */
  547. tmp = memblock_end_of_DRAM();
  548. do_div(tmp, 20);
  549. /* round it down in multiples of 256 */
  550. tmp = tmp & ~0x0FFFFFFFUL;
  551. return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
  552. }
  553. /**
  554. * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
  555. *
  556. * This routine may reserve memory regions in the kernel only
  557. * if the system is supported and a dump was taken in last
  558. * boot instance or if the hardware is supported and the
  559. * scratch area needs to be setup. In other instances it returns
  560. * without reserving anything. The memory in case of dump being
  561. * active is freed when the dump is collected (by userland tools).
  562. */
  563. static void __init phyp_dump_reserve_mem(void)
  564. {
  565. unsigned long base, size;
  566. unsigned long variable_reserve_size;
  567. if (!phyp_dump_info->phyp_dump_configured) {
  568. printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
  569. return;
  570. }
  571. if (!phyp_dump_info->phyp_dump_at_boot) {
  572. printk(KERN_INFO "Phyp-dump disabled at boot time\n");
  573. return;
  574. }
  575. variable_reserve_size = phyp_dump_calculate_reserve_size();
  576. if (phyp_dump_info->phyp_dump_is_active) {
  577. /* Reserve *everything* above RMR.Area freed by userland tools*/
  578. base = variable_reserve_size;
  579. size = memblock_end_of_DRAM() - base;
  580. /* XXX crashed_ram_end is wrong, since it may be beyond
  581. * the memory_limit, it will need to be adjusted. */
  582. memblock_reserve(base, size);
  583. phyp_dump_info->init_reserve_start = base;
  584. phyp_dump_info->init_reserve_size = size;
  585. } else {
  586. size = phyp_dump_info->cpu_state_size +
  587. phyp_dump_info->hpte_region_size +
  588. variable_reserve_size;
  589. base = memblock_end_of_DRAM() - size;
  590. memblock_reserve(base, size);
  591. phyp_dump_info->init_reserve_start = base;
  592. phyp_dump_info->init_reserve_size = size;
  593. }
  594. }
  595. #else
  596. static inline void __init phyp_dump_reserve_mem(void) {}
  597. #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
  598. void __init early_init_devtree(void *params)
  599. {
  600. phys_addr_t limit;
  601. DBG(" -> early_init_devtree(%p)\n", params);
  602. /* Setup flat device-tree pointer */
  603. initial_boot_params = params;
  604. #ifdef CONFIG_PPC_RTAS
  605. /* Some machines might need RTAS info for debugging, grab it now. */
  606. of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
  607. #endif
  608. #ifdef CONFIG_PHYP_DUMP
  609. /* scan tree to see if dump occurred during last boot */
  610. of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
  611. #endif
  612. /* Retrieve various informations from the /chosen node of the
  613. * device-tree, including the platform type, initrd location and
  614. * size, TCE reserve, and more ...
  615. */
  616. of_scan_flat_dt(early_init_dt_scan_chosen_ppc, NULL);
  617. /* Scan memory nodes and rebuild MEMBLOCKs */
  618. memblock_init();
  619. of_scan_flat_dt(early_init_dt_scan_root, NULL);
  620. of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
  621. setup_initial_memory_limit(memstart_addr, first_memblock_size);
  622. /* Save command line for /proc/cmdline and then parse parameters */
  623. strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
  624. parse_early_param();
  625. /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
  626. memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
  627. /* If relocatable, reserve first 32k for interrupt vectors etc. */
  628. if (PHYSICAL_START > MEMORY_START)
  629. memblock_reserve(MEMORY_START, 0x8000);
  630. reserve_kdump_trampoline();
  631. reserve_crashkernel();
  632. early_reserve_mem();
  633. phyp_dump_reserve_mem();
  634. limit = memory_limit;
  635. if (! limit) {
  636. phys_addr_t memsize;
  637. /* Ensure that total memory size is page-aligned, because
  638. * otherwise mark_bootmem() gets upset. */
  639. memblock_analyze();
  640. memsize = memblock_phys_mem_size();
  641. if ((memsize & PAGE_MASK) != memsize)
  642. limit = memsize & PAGE_MASK;
  643. }
  644. memblock_enforce_memory_limit(limit);
  645. memblock_analyze();
  646. memblock_dump_all();
  647. DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
  648. /* We may need to relocate the flat tree, do it now.
  649. * FIXME .. and the initrd too? */
  650. move_device_tree();
  651. allocate_pacas();
  652. DBG("Scanning CPUs ...\n");
  653. /* Retrieve CPU related informations from the flat tree
  654. * (altivec support, boot CPU ID, ...)
  655. */
  656. of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
  657. DBG(" <- early_init_devtree()\n");
  658. }
  659. /*******
  660. *
  661. * New implementation of the OF "find" APIs, return a refcounted
  662. * object, call of_node_put() when done. The device tree and list
  663. * are protected by a rw_lock.
  664. *
  665. * Note that property management will need some locking as well,
  666. * this isn't dealt with yet.
  667. *
  668. *******/
  669. /**
  670. * of_find_next_cache_node - Find a node's subsidiary cache
  671. * @np: node of type "cpu" or "cache"
  672. *
  673. * Returns a node pointer with refcount incremented, use
  674. * of_node_put() on it when done. Caller should hold a reference
  675. * to np.
  676. */
  677. struct device_node *of_find_next_cache_node(struct device_node *np)
  678. {
  679. struct device_node *child;
  680. const phandle *handle;
  681. handle = of_get_property(np, "l2-cache", NULL);
  682. if (!handle)
  683. handle = of_get_property(np, "next-level-cache", NULL);
  684. if (handle)
  685. return of_find_node_by_phandle(*handle);
  686. /* OF on pmac has nodes instead of properties named "l2-cache"
  687. * beneath CPU nodes.
  688. */
  689. if (!strcmp(np->type, "cpu"))
  690. for_each_child_of_node(np, child)
  691. if (!strcmp(child->type, "cache"))
  692. return child;
  693. return NULL;
  694. }
  695. #ifdef CONFIG_PPC_PSERIES
  696. /*
  697. * Fix up the uninitialized fields in a new device node:
  698. * name, type and pci-specific fields
  699. */
  700. static int of_finish_dynamic_node(struct device_node *node)
  701. {
  702. struct device_node *parent = of_get_parent(node);
  703. int err = 0;
  704. const phandle *ibm_phandle;
  705. node->name = of_get_property(node, "name", NULL);
  706. node->type = of_get_property(node, "device_type", NULL);
  707. if (!node->name)
  708. node->name = "<NULL>";
  709. if (!node->type)
  710. node->type = "<NULL>";
  711. if (!parent) {
  712. err = -ENODEV;
  713. goto out;
  714. }
  715. /* We don't support that function on PowerMac, at least
  716. * not yet
  717. */
  718. if (machine_is(powermac))
  719. return -ENODEV;
  720. /* fix up new node's phandle field */
  721. if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
  722. node->phandle = *ibm_phandle;
  723. out:
  724. of_node_put(parent);
  725. return err;
  726. }
  727. static int prom_reconfig_notifier(struct notifier_block *nb,
  728. unsigned long action, void *node)
  729. {
  730. int err;
  731. switch (action) {
  732. case PSERIES_RECONFIG_ADD:
  733. err = of_finish_dynamic_node(node);
  734. if (err < 0) {
  735. printk(KERN_ERR "finish_node returned %d\n", err);
  736. err = NOTIFY_BAD;
  737. }
  738. break;
  739. default:
  740. err = NOTIFY_DONE;
  741. break;
  742. }
  743. return err;
  744. }
  745. static struct notifier_block prom_reconfig_nb = {
  746. .notifier_call = prom_reconfig_notifier,
  747. .priority = 10, /* This one needs to run first */
  748. };
  749. static int __init prom_reconfig_setup(void)
  750. {
  751. return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
  752. }
  753. __initcall(prom_reconfig_setup);
  754. #endif
  755. /* Find the device node for a given logical cpu number, also returns the cpu
  756. * local thread number (index in ibm,interrupt-server#s) if relevant and
  757. * asked for (non NULL)
  758. */
  759. struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
  760. {
  761. int hardid;
  762. struct device_node *np;
  763. hardid = get_hard_smp_processor_id(cpu);
  764. for_each_node_by_type(np, "cpu") {
  765. const u32 *intserv;
  766. unsigned int plen, t;
  767. /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
  768. * fallback to "reg" property and assume no threads
  769. */
  770. intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
  771. &plen);
  772. if (intserv == NULL) {
  773. const u32 *reg = of_get_property(np, "reg", NULL);
  774. if (reg == NULL)
  775. continue;
  776. if (*reg == hardid) {
  777. if (thread)
  778. *thread = 0;
  779. return np;
  780. }
  781. } else {
  782. plen /= sizeof(u32);
  783. for (t = 0; t < plen; t++) {
  784. if (hardid == intserv[t]) {
  785. if (thread)
  786. *thread = t;
  787. return np;
  788. }
  789. }
  790. }
  791. }
  792. return NULL;
  793. }
  794. EXPORT_SYMBOL(of_get_cpu_node);
  795. #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
  796. static struct debugfs_blob_wrapper flat_dt_blob;
  797. static int __init export_flat_device_tree(void)
  798. {
  799. struct dentry *d;
  800. flat_dt_blob.data = initial_boot_params;
  801. flat_dt_blob.size = initial_boot_params->totalsize;
  802. d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
  803. powerpc_debugfs_root, &flat_dt_blob);
  804. if (!d)
  805. return 1;
  806. return 0;
  807. }
  808. __initcall(export_flat_device_tree);
  809. #endif