prom.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786
  1. /*
  2. * Procedures for creating, accessing and interpreting the device tree.
  3. *
  4. * Paul Mackerras August 1996.
  5. * Copyright (C) 1996-2005 Paul Mackerras.
  6. *
  7. * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
  8. * {engebret|bergner}@us.ibm.com
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. #undef DEBUG
  16. #include <stdarg.h>
  17. #include <linux/config.h>
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/init.h>
  21. #include <linux/threads.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/types.h>
  24. #include <linux/pci.h>
  25. #include <linux/stringify.h>
  26. #include <linux/delay.h>
  27. #include <linux/initrd.h>
  28. #include <linux/bitops.h>
  29. #include <linux/module.h>
  30. #include <linux/kexec.h>
  31. #include <asm/prom.h>
  32. #include <asm/rtas.h>
  33. #include <asm/lmb.h>
  34. #include <asm/page.h>
  35. #include <asm/processor.h>
  36. #include <asm/irq.h>
  37. #include <asm/io.h>
  38. #include <asm/kdump.h>
  39. #include <asm/smp.h>
  40. #include <asm/system.h>
  41. #include <asm/mmu.h>
  42. #include <asm/pgtable.h>
  43. #include <asm/pci.h>
  44. #include <asm/iommu.h>
  45. #include <asm/btext.h>
  46. #include <asm/sections.h>
  47. #include <asm/machdep.h>
  48. #include <asm/pSeries_reconfig.h>
  49. #include <asm/pci-bridge.h>
  50. #ifdef DEBUG
  51. #define DBG(fmt...) printk(KERN_ERR fmt)
  52. #else
  53. #define DBG(fmt...)
  54. #endif
  55. static int __initdata dt_root_addr_cells;
  56. static int __initdata dt_root_size_cells;
  57. #ifdef CONFIG_PPC64
  58. static int __initdata iommu_is_off;
  59. int __initdata iommu_force_on;
  60. unsigned long tce_alloc_start, tce_alloc_end;
  61. #endif
  62. typedef u32 cell_t;
  63. #if 0
  64. static struct boot_param_header *initial_boot_params __initdata;
  65. #else
  66. struct boot_param_header *initial_boot_params;
  67. #endif
  68. static struct device_node *allnodes = NULL;
  69. /* use when traversing tree through the allnext, child, sibling,
  70. * or parent members of struct device_node.
  71. */
  72. static DEFINE_RWLOCK(devtree_lock);
  73. /* export that to outside world */
  74. struct device_node *of_chosen;
  75. struct device_node *dflt_interrupt_controller;
  76. int num_interrupt_controllers;
  77. /*
  78. * Wrapper for allocating memory for various data that needs to be
  79. * attached to device nodes as they are processed at boot or when
  80. * added to the device tree later (e.g. DLPAR). At boot there is
  81. * already a region reserved so we just increment *mem_start by size;
  82. * otherwise we call kmalloc.
  83. */
  84. static void * prom_alloc(unsigned long size, unsigned long *mem_start)
  85. {
  86. unsigned long tmp;
  87. if (!mem_start)
  88. return kmalloc(size, GFP_KERNEL);
  89. tmp = *mem_start;
  90. *mem_start += size;
  91. return (void *)tmp;
  92. }
  93. /*
  94. * Find the device_node with a given phandle.
  95. */
  96. static struct device_node * find_phandle(phandle ph)
  97. {
  98. struct device_node *np;
  99. for (np = allnodes; np != 0; np = np->allnext)
  100. if (np->linux_phandle == ph)
  101. return np;
  102. return NULL;
  103. }
  104. /*
  105. * Find the interrupt parent of a node.
  106. */
  107. static struct device_node * __devinit intr_parent(struct device_node *p)
  108. {
  109. phandle *parp;
  110. parp = (phandle *) get_property(p, "interrupt-parent", NULL);
  111. if (parp == NULL)
  112. return p->parent;
  113. p = find_phandle(*parp);
  114. if (p != NULL)
  115. return p;
  116. /*
  117. * On a powermac booted with BootX, we don't get to know the
  118. * phandles for any nodes, so find_phandle will return NULL.
  119. * Fortunately these machines only have one interrupt controller
  120. * so there isn't in fact any ambiguity. -- paulus
  121. */
  122. if (num_interrupt_controllers == 1)
  123. p = dflt_interrupt_controller;
  124. return p;
  125. }
  126. /*
  127. * Find out the size of each entry of the interrupts property
  128. * for a node.
  129. */
  130. int __devinit prom_n_intr_cells(struct device_node *np)
  131. {
  132. struct device_node *p;
  133. unsigned int *icp;
  134. for (p = np; (p = intr_parent(p)) != NULL; ) {
  135. icp = (unsigned int *)
  136. get_property(p, "#interrupt-cells", NULL);
  137. if (icp != NULL)
  138. return *icp;
  139. if (get_property(p, "interrupt-controller", NULL) != NULL
  140. || get_property(p, "interrupt-map", NULL) != NULL) {
  141. printk("oops, node %s doesn't have #interrupt-cells\n",
  142. p->full_name);
  143. return 1;
  144. }
  145. }
  146. #ifdef DEBUG_IRQ
  147. printk("prom_n_intr_cells failed for %s\n", np->full_name);
  148. #endif
  149. return 1;
  150. }
  151. /*
  152. * Map an interrupt from a device up to the platform interrupt
  153. * descriptor.
  154. */
  155. static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
  156. struct device_node *np, unsigned int *ints,
  157. int nintrc)
  158. {
  159. struct device_node *p, *ipar;
  160. unsigned int *imap, *imask, *ip;
  161. int i, imaplen, match;
  162. int newintrc = 0, newaddrc = 0;
  163. unsigned int *reg;
  164. int naddrc;
  165. reg = (unsigned int *) get_property(np, "reg", NULL);
  166. naddrc = prom_n_addr_cells(np);
  167. p = intr_parent(np);
  168. while (p != NULL) {
  169. if (get_property(p, "interrupt-controller", NULL) != NULL)
  170. /* this node is an interrupt controller, stop here */
  171. break;
  172. imap = (unsigned int *)
  173. get_property(p, "interrupt-map", &imaplen);
  174. if (imap == NULL) {
  175. p = intr_parent(p);
  176. continue;
  177. }
  178. imask = (unsigned int *)
  179. get_property(p, "interrupt-map-mask", NULL);
  180. if (imask == NULL) {
  181. printk("oops, %s has interrupt-map but no mask\n",
  182. p->full_name);
  183. return 0;
  184. }
  185. imaplen /= sizeof(unsigned int);
  186. match = 0;
  187. ipar = NULL;
  188. while (imaplen > 0 && !match) {
  189. /* check the child-interrupt field */
  190. match = 1;
  191. for (i = 0; i < naddrc && match; ++i)
  192. match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
  193. for (; i < naddrc + nintrc && match; ++i)
  194. match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
  195. imap += naddrc + nintrc;
  196. imaplen -= naddrc + nintrc;
  197. /* grab the interrupt parent */
  198. ipar = find_phandle((phandle) *imap++);
  199. --imaplen;
  200. if (ipar == NULL && num_interrupt_controllers == 1)
  201. /* cope with BootX not giving us phandles */
  202. ipar = dflt_interrupt_controller;
  203. if (ipar == NULL) {
  204. printk("oops, no int parent %x in map of %s\n",
  205. imap[-1], p->full_name);
  206. return 0;
  207. }
  208. /* find the parent's # addr and intr cells */
  209. ip = (unsigned int *)
  210. get_property(ipar, "#interrupt-cells", NULL);
  211. if (ip == NULL) {
  212. printk("oops, no #interrupt-cells on %s\n",
  213. ipar->full_name);
  214. return 0;
  215. }
  216. newintrc = *ip;
  217. ip = (unsigned int *)
  218. get_property(ipar, "#address-cells", NULL);
  219. newaddrc = (ip == NULL)? 0: *ip;
  220. imap += newaddrc + newintrc;
  221. imaplen -= newaddrc + newintrc;
  222. }
  223. if (imaplen < 0) {
  224. printk("oops, error decoding int-map on %s, len=%d\n",
  225. p->full_name, imaplen);
  226. return 0;
  227. }
  228. if (!match) {
  229. #ifdef DEBUG_IRQ
  230. printk("oops, no match in %s int-map for %s\n",
  231. p->full_name, np->full_name);
  232. #endif
  233. return 0;
  234. }
  235. p = ipar;
  236. naddrc = newaddrc;
  237. nintrc = newintrc;
  238. ints = imap - nintrc;
  239. reg = ints - naddrc;
  240. }
  241. if (p == NULL) {
  242. #ifdef DEBUG_IRQ
  243. printk("hmmm, int tree for %s doesn't have ctrler\n",
  244. np->full_name);
  245. #endif
  246. return 0;
  247. }
  248. *irq = ints;
  249. *ictrler = p;
  250. return nintrc;
  251. }
  252. static unsigned char map_isa_senses[4] = {
  253. IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
  254. IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
  255. IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
  256. IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
  257. };
  258. static unsigned char map_mpic_senses[4] = {
  259. IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
  260. IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
  261. /* 2 seems to be used for the 8259 cascade... */
  262. IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
  263. IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
  264. };
  265. static int __devinit finish_node_interrupts(struct device_node *np,
  266. unsigned long *mem_start,
  267. int measure_only)
  268. {
  269. unsigned int *ints;
  270. int intlen, intrcells, intrcount;
  271. int i, j, n, sense;
  272. unsigned int *irq, virq;
  273. struct device_node *ic;
  274. if (num_interrupt_controllers == 0) {
  275. /*
  276. * Old machines just have a list of interrupt numbers
  277. * and no interrupt-controller nodes.
  278. */
  279. ints = (unsigned int *) get_property(np, "AAPL,interrupts",
  280. &intlen);
  281. /* XXX old interpret_pci_props looked in parent too */
  282. /* XXX old interpret_macio_props looked for interrupts
  283. before AAPL,interrupts */
  284. if (ints == NULL)
  285. ints = (unsigned int *) get_property(np, "interrupts",
  286. &intlen);
  287. if (ints == NULL)
  288. return 0;
  289. np->n_intrs = intlen / sizeof(unsigned int);
  290. np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
  291. mem_start);
  292. if (!np->intrs)
  293. return -ENOMEM;
  294. if (measure_only)
  295. return 0;
  296. for (i = 0; i < np->n_intrs; ++i) {
  297. np->intrs[i].line = *ints++;
  298. np->intrs[i].sense = IRQ_SENSE_LEVEL
  299. | IRQ_POLARITY_NEGATIVE;
  300. }
  301. return 0;
  302. }
  303. ints = (unsigned int *) get_property(np, "interrupts", &intlen);
  304. if (ints == NULL)
  305. return 0;
  306. intrcells = prom_n_intr_cells(np);
  307. intlen /= intrcells * sizeof(unsigned int);
  308. np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
  309. if (!np->intrs)
  310. return -ENOMEM;
  311. if (measure_only)
  312. return 0;
  313. intrcount = 0;
  314. for (i = 0; i < intlen; ++i, ints += intrcells) {
  315. n = map_interrupt(&irq, &ic, np, ints, intrcells);
  316. if (n <= 0)
  317. continue;
  318. /* don't map IRQ numbers under a cascaded 8259 controller */
  319. if (ic && device_is_compatible(ic, "chrp,iic")) {
  320. np->intrs[intrcount].line = irq[0];
  321. sense = (n > 1)? (irq[1] & 3): 3;
  322. np->intrs[intrcount].sense = map_isa_senses[sense];
  323. } else {
  324. virq = virt_irq_create_mapping(irq[0]);
  325. #ifdef CONFIG_PPC64
  326. if (virq == NO_IRQ) {
  327. printk(KERN_CRIT "Could not allocate interrupt"
  328. " number for %s\n", np->full_name);
  329. continue;
  330. }
  331. #endif
  332. np->intrs[intrcount].line = irq_offset_up(virq);
  333. sense = (n > 1)? (irq[1] & 3): 1;
  334. np->intrs[intrcount].sense = map_mpic_senses[sense];
  335. }
  336. #ifdef CONFIG_PPC64
  337. /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
  338. if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
  339. char *name = get_property(ic->parent, "name", NULL);
  340. if (name && !strcmp(name, "u3"))
  341. np->intrs[intrcount].line += 128;
  342. else if (!(name && !strcmp(name, "mac-io")))
  343. /* ignore other cascaded controllers, such as
  344. the k2-sata-root */
  345. break;
  346. }
  347. #endif
  348. if (n > 2) {
  349. printk("hmmm, got %d intr cells for %s:", n,
  350. np->full_name);
  351. for (j = 0; j < n; ++j)
  352. printk(" %d", irq[j]);
  353. printk("\n");
  354. }
  355. ++intrcount;
  356. }
  357. np->n_intrs = intrcount;
  358. return 0;
  359. }
  360. static int __devinit finish_node(struct device_node *np,
  361. unsigned long *mem_start,
  362. int measure_only)
  363. {
  364. struct device_node *child;
  365. int rc = 0;
  366. rc = finish_node_interrupts(np, mem_start, measure_only);
  367. if (rc)
  368. goto out;
  369. for (child = np->child; child != NULL; child = child->sibling) {
  370. rc = finish_node(child, mem_start, measure_only);
  371. if (rc)
  372. goto out;
  373. }
  374. out:
  375. return rc;
  376. }
  377. static void __init scan_interrupt_controllers(void)
  378. {
  379. struct device_node *np;
  380. int n = 0;
  381. char *name, *ic;
  382. int iclen;
  383. for (np = allnodes; np != NULL; np = np->allnext) {
  384. ic = get_property(np, "interrupt-controller", &iclen);
  385. name = get_property(np, "name", NULL);
  386. /* checking iclen makes sure we don't get a false
  387. match on /chosen.interrupt_controller */
  388. if ((name != NULL
  389. && strcmp(name, "interrupt-controller") == 0)
  390. || (ic != NULL && iclen == 0
  391. && strcmp(name, "AppleKiwi"))) {
  392. if (n == 0)
  393. dflt_interrupt_controller = np;
  394. ++n;
  395. }
  396. }
  397. num_interrupt_controllers = n;
  398. }
  399. /**
  400. * finish_device_tree is called once things are running normally
  401. * (i.e. with text and data mapped to the address they were linked at).
  402. * It traverses the device tree and fills in some of the additional,
  403. * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
  404. * mapping is also initialized at this point.
  405. */
  406. void __init finish_device_tree(void)
  407. {
  408. unsigned long start, end, size = 0;
  409. DBG(" -> finish_device_tree\n");
  410. #ifdef CONFIG_PPC64
  411. /* Initialize virtual IRQ map */
  412. virt_irq_init();
  413. #endif
  414. scan_interrupt_controllers();
  415. /*
  416. * Finish device-tree (pre-parsing some properties etc...)
  417. * We do this in 2 passes. One with "measure_only" set, which
  418. * will only measure the amount of memory needed, then we can
  419. * allocate that memory, and call finish_node again. However,
  420. * we must be careful as most routines will fail nowadays when
  421. * prom_alloc() returns 0, so we must make sure our first pass
  422. * doesn't start at 0. We pre-initialize size to 16 for that
  423. * reason and then remove those additional 16 bytes
  424. */
  425. size = 16;
  426. finish_node(allnodes, &size, 1);
  427. size -= 16;
  428. end = start = (unsigned long) __va(lmb_alloc(size, 128));
  429. finish_node(allnodes, &end, 0);
  430. BUG_ON(end != start + size);
  431. DBG(" <- finish_device_tree\n");
  432. }
  433. static inline char *find_flat_dt_string(u32 offset)
  434. {
  435. return ((char *)initial_boot_params) +
  436. initial_boot_params->off_dt_strings + offset;
  437. }
  438. /**
  439. * This function is used to scan the flattened device-tree, it is
  440. * used to extract the memory informations at boot before we can
  441. * unflatten the tree
  442. */
  443. int __init of_scan_flat_dt(int (*it)(unsigned long node,
  444. const char *uname, int depth,
  445. void *data),
  446. void *data)
  447. {
  448. unsigned long p = ((unsigned long)initial_boot_params) +
  449. initial_boot_params->off_dt_struct;
  450. int rc = 0;
  451. int depth = -1;
  452. do {
  453. u32 tag = *((u32 *)p);
  454. char *pathp;
  455. p += 4;
  456. if (tag == OF_DT_END_NODE) {
  457. depth --;
  458. continue;
  459. }
  460. if (tag == OF_DT_NOP)
  461. continue;
  462. if (tag == OF_DT_END)
  463. break;
  464. if (tag == OF_DT_PROP) {
  465. u32 sz = *((u32 *)p);
  466. p += 8;
  467. if (initial_boot_params->version < 0x10)
  468. p = _ALIGN(p, sz >= 8 ? 8 : 4);
  469. p += sz;
  470. p = _ALIGN(p, 4);
  471. continue;
  472. }
  473. if (tag != OF_DT_BEGIN_NODE) {
  474. printk(KERN_WARNING "Invalid tag %x scanning flattened"
  475. " device tree !\n", tag);
  476. return -EINVAL;
  477. }
  478. depth++;
  479. pathp = (char *)p;
  480. p = _ALIGN(p + strlen(pathp) + 1, 4);
  481. if ((*pathp) == '/') {
  482. char *lp, *np;
  483. for (lp = NULL, np = pathp; *np; np++)
  484. if ((*np) == '/')
  485. lp = np+1;
  486. if (lp != NULL)
  487. pathp = lp;
  488. }
  489. rc = it(p, pathp, depth, data);
  490. if (rc != 0)
  491. break;
  492. } while(1);
  493. return rc;
  494. }
  495. /**
  496. * This function can be used within scan_flattened_dt callback to get
  497. * access to properties
  498. */
  499. void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
  500. unsigned long *size)
  501. {
  502. unsigned long p = node;
  503. do {
  504. u32 tag = *((u32 *)p);
  505. u32 sz, noff;
  506. const char *nstr;
  507. p += 4;
  508. if (tag == OF_DT_NOP)
  509. continue;
  510. if (tag != OF_DT_PROP)
  511. return NULL;
  512. sz = *((u32 *)p);
  513. noff = *((u32 *)(p + 4));
  514. p += 8;
  515. if (initial_boot_params->version < 0x10)
  516. p = _ALIGN(p, sz >= 8 ? 8 : 4);
  517. nstr = find_flat_dt_string(noff);
  518. if (nstr == NULL) {
  519. printk(KERN_WARNING "Can't find property index"
  520. " name !\n");
  521. return NULL;
  522. }
  523. if (strcmp(name, nstr) == 0) {
  524. if (size)
  525. *size = sz;
  526. return (void *)p;
  527. }
  528. p += sz;
  529. p = _ALIGN(p, 4);
  530. } while(1);
  531. }
  532. static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
  533. unsigned long align)
  534. {
  535. void *res;
  536. *mem = _ALIGN(*mem, align);
  537. res = (void *)*mem;
  538. *mem += size;
  539. return res;
  540. }
  541. static unsigned long __init unflatten_dt_node(unsigned long mem,
  542. unsigned long *p,
  543. struct device_node *dad,
  544. struct device_node ***allnextpp,
  545. unsigned long fpsize)
  546. {
  547. struct device_node *np;
  548. struct property *pp, **prev_pp = NULL;
  549. char *pathp;
  550. u32 tag;
  551. unsigned int l, allocl;
  552. int has_name = 0;
  553. int new_format = 0;
  554. tag = *((u32 *)(*p));
  555. if (tag != OF_DT_BEGIN_NODE) {
  556. printk("Weird tag at start of node: %x\n", tag);
  557. return mem;
  558. }
  559. *p += 4;
  560. pathp = (char *)*p;
  561. l = allocl = strlen(pathp) + 1;
  562. *p = _ALIGN(*p + l, 4);
  563. /* version 0x10 has a more compact unit name here instead of the full
  564. * path. we accumulate the full path size using "fpsize", we'll rebuild
  565. * it later. We detect this because the first character of the name is
  566. * not '/'.
  567. */
  568. if ((*pathp) != '/') {
  569. new_format = 1;
  570. if (fpsize == 0) {
  571. /* root node: special case. fpsize accounts for path
  572. * plus terminating zero. root node only has '/', so
  573. * fpsize should be 2, but we want to avoid the first
  574. * level nodes to have two '/' so we use fpsize 1 here
  575. */
  576. fpsize = 1;
  577. allocl = 2;
  578. } else {
  579. /* account for '/' and path size minus terminal 0
  580. * already in 'l'
  581. */
  582. fpsize += l;
  583. allocl = fpsize;
  584. }
  585. }
  586. np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
  587. __alignof__(struct device_node));
  588. if (allnextpp) {
  589. memset(np, 0, sizeof(*np));
  590. np->full_name = ((char*)np) + sizeof(struct device_node);
  591. if (new_format) {
  592. char *p = np->full_name;
  593. /* rebuild full path for new format */
  594. if (dad && dad->parent) {
  595. strcpy(p, dad->full_name);
  596. #ifdef DEBUG
  597. if ((strlen(p) + l + 1) != allocl) {
  598. DBG("%s: p: %d, l: %d, a: %d\n",
  599. pathp, strlen(p), l, allocl);
  600. }
  601. #endif
  602. p += strlen(p);
  603. }
  604. *(p++) = '/';
  605. memcpy(p, pathp, l);
  606. } else
  607. memcpy(np->full_name, pathp, l);
  608. prev_pp = &np->properties;
  609. **allnextpp = np;
  610. *allnextpp = &np->allnext;
  611. if (dad != NULL) {
  612. np->parent = dad;
  613. /* we temporarily use the next field as `last_child'*/
  614. if (dad->next == 0)
  615. dad->child = np;
  616. else
  617. dad->next->sibling = np;
  618. dad->next = np;
  619. }
  620. kref_init(&np->kref);
  621. }
  622. while(1) {
  623. u32 sz, noff;
  624. char *pname;
  625. tag = *((u32 *)(*p));
  626. if (tag == OF_DT_NOP) {
  627. *p += 4;
  628. continue;
  629. }
  630. if (tag != OF_DT_PROP)
  631. break;
  632. *p += 4;
  633. sz = *((u32 *)(*p));
  634. noff = *((u32 *)((*p) + 4));
  635. *p += 8;
  636. if (initial_boot_params->version < 0x10)
  637. *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
  638. pname = find_flat_dt_string(noff);
  639. if (pname == NULL) {
  640. printk("Can't find property name in list !\n");
  641. break;
  642. }
  643. if (strcmp(pname, "name") == 0)
  644. has_name = 1;
  645. l = strlen(pname) + 1;
  646. pp = unflatten_dt_alloc(&mem, sizeof(struct property),
  647. __alignof__(struct property));
  648. if (allnextpp) {
  649. if (strcmp(pname, "linux,phandle") == 0) {
  650. np->node = *((u32 *)*p);
  651. if (np->linux_phandle == 0)
  652. np->linux_phandle = np->node;
  653. }
  654. if (strcmp(pname, "ibm,phandle") == 0)
  655. np->linux_phandle = *((u32 *)*p);
  656. pp->name = pname;
  657. pp->length = sz;
  658. pp->value = (void *)*p;
  659. *prev_pp = pp;
  660. prev_pp = &pp->next;
  661. }
  662. *p = _ALIGN((*p) + sz, 4);
  663. }
  664. /* with version 0x10 we may not have the name property, recreate
  665. * it here from the unit name if absent
  666. */
  667. if (!has_name) {
  668. char *p = pathp, *ps = pathp, *pa = NULL;
  669. int sz;
  670. while (*p) {
  671. if ((*p) == '@')
  672. pa = p;
  673. if ((*p) == '/')
  674. ps = p + 1;
  675. p++;
  676. }
  677. if (pa < ps)
  678. pa = p;
  679. sz = (pa - ps) + 1;
  680. pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
  681. __alignof__(struct property));
  682. if (allnextpp) {
  683. pp->name = "name";
  684. pp->length = sz;
  685. pp->value = (unsigned char *)(pp + 1);
  686. *prev_pp = pp;
  687. prev_pp = &pp->next;
  688. memcpy(pp->value, ps, sz - 1);
  689. ((char *)pp->value)[sz - 1] = 0;
  690. DBG("fixed up name for %s -> %s\n", pathp, pp->value);
  691. }
  692. }
  693. if (allnextpp) {
  694. *prev_pp = NULL;
  695. np->name = get_property(np, "name", NULL);
  696. np->type = get_property(np, "device_type", NULL);
  697. if (!np->name)
  698. np->name = "<NULL>";
  699. if (!np->type)
  700. np->type = "<NULL>";
  701. }
  702. while (tag == OF_DT_BEGIN_NODE) {
  703. mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
  704. tag = *((u32 *)(*p));
  705. }
  706. if (tag != OF_DT_END_NODE) {
  707. printk("Weird tag at end of node: %x\n", tag);
  708. return mem;
  709. }
  710. *p += 4;
  711. return mem;
  712. }
  713. /**
  714. * unflattens the device-tree passed by the firmware, creating the
  715. * tree of struct device_node. It also fills the "name" and "type"
  716. * pointers of the nodes so the normal device-tree walking functions
  717. * can be used (this used to be done by finish_device_tree)
  718. */
  719. void __init unflatten_device_tree(void)
  720. {
  721. unsigned long start, mem, size;
  722. struct device_node **allnextp = &allnodes;
  723. char *p = NULL;
  724. int l = 0;
  725. DBG(" -> unflatten_device_tree()\n");
  726. /* First pass, scan for size */
  727. start = ((unsigned long)initial_boot_params) +
  728. initial_boot_params->off_dt_struct;
  729. size = unflatten_dt_node(0, &start, NULL, NULL, 0);
  730. size = (size | 3) + 1;
  731. DBG(" size is %lx, allocating...\n", size);
  732. /* Allocate memory for the expanded device tree */
  733. mem = lmb_alloc(size + 4, __alignof__(struct device_node));
  734. if (!mem) {
  735. DBG("Couldn't allocate memory with lmb_alloc()!\n");
  736. panic("Couldn't allocate memory with lmb_alloc()!\n");
  737. }
  738. mem = (unsigned long) __va(mem);
  739. ((u32 *)mem)[size / 4] = 0xdeadbeef;
  740. DBG(" unflattening %lx...\n", mem);
  741. /* Second pass, do actual unflattening */
  742. start = ((unsigned long)initial_boot_params) +
  743. initial_boot_params->off_dt_struct;
  744. unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
  745. if (*((u32 *)start) != OF_DT_END)
  746. printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
  747. if (((u32 *)mem)[size / 4] != 0xdeadbeef)
  748. printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
  749. ((u32 *)mem)[size / 4] );
  750. *allnextp = NULL;
  751. /* Get pointer to OF "/chosen" node for use everywhere */
  752. of_chosen = of_find_node_by_path("/chosen");
  753. if (of_chosen == NULL)
  754. of_chosen = of_find_node_by_path("/chosen@0");
  755. /* Retreive command line */
  756. if (of_chosen != NULL) {
  757. p = (char *)get_property(of_chosen, "bootargs", &l);
  758. if (p != NULL && l > 0)
  759. strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
  760. }
  761. #ifdef CONFIG_CMDLINE
  762. if (l == 0 || (l == 1 && (*p) == 0))
  763. strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
  764. #endif /* CONFIG_CMDLINE */
  765. DBG("Command line is: %s\n", cmd_line);
  766. DBG(" <- unflatten_device_tree()\n");
  767. }
  768. static int __init early_init_dt_scan_cpus(unsigned long node,
  769. const char *uname, int depth, void *data)
  770. {
  771. u32 *prop;
  772. unsigned long size;
  773. char *type = of_get_flat_dt_prop(node, "device_type", &size);
  774. /* We are scanning "cpu" nodes only */
  775. if (type == NULL || strcmp(type, "cpu") != 0)
  776. return 0;
  777. boot_cpuid = 0;
  778. boot_cpuid_phys = 0;
  779. if (initial_boot_params && initial_boot_params->version >= 2) {
  780. /* version 2 of the kexec param format adds the phys cpuid
  781. * of booted proc.
  782. */
  783. boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
  784. } else {
  785. /* Check if it's the boot-cpu, set it's hw index now */
  786. if (of_get_flat_dt_prop(node,
  787. "linux,boot-cpu", NULL) != NULL) {
  788. prop = of_get_flat_dt_prop(node, "reg", NULL);
  789. if (prop != NULL)
  790. boot_cpuid_phys = *prop;
  791. }
  792. }
  793. set_hard_smp_processor_id(0, boot_cpuid_phys);
  794. #ifdef CONFIG_ALTIVEC
  795. /* Check if we have a VMX and eventually update CPU features */
  796. prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
  797. if (prop && (*prop) > 0) {
  798. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  799. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  800. }
  801. /* Same goes for Apple's "altivec" property */
  802. prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
  803. if (prop) {
  804. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  805. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  806. }
  807. #endif /* CONFIG_ALTIVEC */
  808. #ifdef CONFIG_PPC_PSERIES
  809. /*
  810. * Check for an SMT capable CPU and set the CPU feature. We do
  811. * this by looking at the size of the ibm,ppc-interrupt-server#s
  812. * property
  813. */
  814. prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
  815. &size);
  816. cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
  817. if (prop && ((size / sizeof(u32)) > 1))
  818. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  819. #endif
  820. return 0;
  821. }
  822. static int __init early_init_dt_scan_chosen(unsigned long node,
  823. const char *uname, int depth, void *data)
  824. {
  825. u32 *prop;
  826. unsigned long *lprop;
  827. DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
  828. if (depth != 1 ||
  829. (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
  830. return 0;
  831. /* get platform type */
  832. prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
  833. if (prop == NULL)
  834. return 0;
  835. #ifdef CONFIG_PPC_MULTIPLATFORM
  836. _machine = *prop;
  837. #endif
  838. #ifdef CONFIG_PPC64
  839. /* check if iommu is forced on or off */
  840. if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
  841. iommu_is_off = 1;
  842. if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
  843. iommu_force_on = 1;
  844. #endif
  845. lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
  846. if (lprop)
  847. memory_limit = *lprop;
  848. #ifdef CONFIG_PPC64
  849. lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
  850. if (lprop)
  851. tce_alloc_start = *lprop;
  852. lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
  853. if (lprop)
  854. tce_alloc_end = *lprop;
  855. #endif
  856. #ifdef CONFIG_PPC_RTAS
  857. /* To help early debugging via the front panel, we retreive a minimal
  858. * set of RTAS infos now if available
  859. */
  860. {
  861. u64 *basep, *entryp;
  862. basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
  863. entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
  864. prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
  865. if (basep && entryp && prop) {
  866. rtas.base = *basep;
  867. rtas.entry = *entryp;
  868. rtas.size = *prop;
  869. }
  870. }
  871. #endif /* CONFIG_PPC_RTAS */
  872. #ifdef CONFIG_KEXEC
  873. lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
  874. if (lprop)
  875. crashk_res.start = *lprop;
  876. lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
  877. if (lprop)
  878. crashk_res.end = crashk_res.start + *lprop - 1;
  879. #endif
  880. /* break now */
  881. return 1;
  882. }
  883. static int __init early_init_dt_scan_root(unsigned long node,
  884. const char *uname, int depth, void *data)
  885. {
  886. u32 *prop;
  887. if (depth != 0)
  888. return 0;
  889. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  890. dt_root_size_cells = (prop == NULL) ? 1 : *prop;
  891. DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
  892. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  893. dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
  894. DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
  895. /* break now */
  896. return 1;
  897. }
  898. static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
  899. {
  900. cell_t *p = *cellp;
  901. unsigned long r;
  902. /* Ignore more than 2 cells */
  903. while (s > sizeof(unsigned long) / 4) {
  904. p++;
  905. s--;
  906. }
  907. r = *p++;
  908. #ifdef CONFIG_PPC64
  909. if (s > 1) {
  910. r <<= 32;
  911. r |= *(p++);
  912. s--;
  913. }
  914. #endif
  915. *cellp = p;
  916. return r;
  917. }
  918. static int __init early_init_dt_scan_memory(unsigned long node,
  919. const char *uname, int depth, void *data)
  920. {
  921. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  922. cell_t *reg, *endp;
  923. unsigned long l;
  924. /* We are scanning "memory" nodes only */
  925. if (type == NULL) {
  926. /*
  927. * The longtrail doesn't have a device_type on the
  928. * /memory node, so look for the node called /memory@0.
  929. */
  930. if (depth != 1 || strcmp(uname, "memory@0") != 0)
  931. return 0;
  932. } else if (strcmp(type, "memory") != 0)
  933. return 0;
  934. reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
  935. if (reg == NULL)
  936. reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
  937. if (reg == NULL)
  938. return 0;
  939. endp = reg + (l / sizeof(cell_t));
  940. DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
  941. uname, l, reg[0], reg[1], reg[2], reg[3]);
  942. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  943. unsigned long base, size;
  944. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  945. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  946. if (size == 0)
  947. continue;
  948. DBG(" - %lx , %lx\n", base, size);
  949. #ifdef CONFIG_PPC64
  950. if (iommu_is_off) {
  951. if (base >= 0x80000000ul)
  952. continue;
  953. if ((base + size) > 0x80000000ul)
  954. size = 0x80000000ul - base;
  955. }
  956. #endif
  957. lmb_add(base, size);
  958. }
  959. return 0;
  960. }
  961. static void __init early_reserve_mem(void)
  962. {
  963. unsigned long base, size;
  964. unsigned long *reserve_map;
  965. reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
  966. initial_boot_params->off_mem_rsvmap);
  967. while (1) {
  968. base = *(reserve_map++);
  969. size = *(reserve_map++);
  970. if (size == 0)
  971. break;
  972. DBG("reserving: %lx -> %lx\n", base, size);
  973. lmb_reserve(base, size);
  974. }
  975. #if 0
  976. DBG("memory reserved, lmbs :\n");
  977. lmb_dump_all();
  978. #endif
  979. }
  980. void __init early_init_devtree(void *params)
  981. {
  982. DBG(" -> early_init_devtree()\n");
  983. /* Setup flat device-tree pointer */
  984. initial_boot_params = params;
  985. /* Retrieve various informations from the /chosen node of the
  986. * device-tree, including the platform type, initrd location and
  987. * size, TCE reserve, and more ...
  988. */
  989. of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
  990. /* Scan memory nodes and rebuild LMBs */
  991. lmb_init();
  992. of_scan_flat_dt(early_init_dt_scan_root, NULL);
  993. of_scan_flat_dt(early_init_dt_scan_memory, NULL);
  994. lmb_enforce_memory_limit(memory_limit);
  995. lmb_analyze();
  996. DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
  997. /* Reserve LMB regions used by kernel, initrd, dt, etc... */
  998. lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
  999. #ifdef CONFIG_CRASH_DUMP
  1000. lmb_reserve(0, KDUMP_RESERVE_LIMIT);
  1001. #endif
  1002. early_reserve_mem();
  1003. DBG("Scanning CPUs ...\n");
  1004. /* Retreive CPU related informations from the flat tree
  1005. * (altivec support, boot CPU ID, ...)
  1006. */
  1007. of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
  1008. DBG(" <- early_init_devtree()\n");
  1009. }
  1010. #undef printk
  1011. int
  1012. prom_n_addr_cells(struct device_node* np)
  1013. {
  1014. int* ip;
  1015. do {
  1016. if (np->parent)
  1017. np = np->parent;
  1018. ip = (int *) get_property(np, "#address-cells", NULL);
  1019. if (ip != NULL)
  1020. return *ip;
  1021. } while (np->parent);
  1022. /* No #address-cells property for the root node, default to 1 */
  1023. return 1;
  1024. }
  1025. EXPORT_SYMBOL(prom_n_addr_cells);
  1026. int
  1027. prom_n_size_cells(struct device_node* np)
  1028. {
  1029. int* ip;
  1030. do {
  1031. if (np->parent)
  1032. np = np->parent;
  1033. ip = (int *) get_property(np, "#size-cells", NULL);
  1034. if (ip != NULL)
  1035. return *ip;
  1036. } while (np->parent);
  1037. /* No #size-cells property for the root node, default to 1 */
  1038. return 1;
  1039. }
  1040. EXPORT_SYMBOL(prom_n_size_cells);
  1041. /**
  1042. * Work out the sense (active-low level / active-high edge)
  1043. * of each interrupt from the device tree.
  1044. */
  1045. void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
  1046. {
  1047. struct device_node *np;
  1048. int i, j;
  1049. /* default to level-triggered */
  1050. memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
  1051. for (np = allnodes; np != 0; np = np->allnext) {
  1052. for (j = 0; j < np->n_intrs; j++) {
  1053. i = np->intrs[j].line;
  1054. if (i >= off && i < max)
  1055. senses[i-off] = np->intrs[j].sense;
  1056. }
  1057. }
  1058. }
  1059. /**
  1060. * Construct and return a list of the device_nodes with a given name.
  1061. */
  1062. struct device_node *find_devices(const char *name)
  1063. {
  1064. struct device_node *head, **prevp, *np;
  1065. prevp = &head;
  1066. for (np = allnodes; np != 0; np = np->allnext) {
  1067. if (np->name != 0 && strcasecmp(np->name, name) == 0) {
  1068. *prevp = np;
  1069. prevp = &np->next;
  1070. }
  1071. }
  1072. *prevp = NULL;
  1073. return head;
  1074. }
  1075. EXPORT_SYMBOL(find_devices);
  1076. /**
  1077. * Construct and return a list of the device_nodes with a given type.
  1078. */
  1079. struct device_node *find_type_devices(const char *type)
  1080. {
  1081. struct device_node *head, **prevp, *np;
  1082. prevp = &head;
  1083. for (np = allnodes; np != 0; np = np->allnext) {
  1084. if (np->type != 0 && strcasecmp(np->type, type) == 0) {
  1085. *prevp = np;
  1086. prevp = &np->next;
  1087. }
  1088. }
  1089. *prevp = NULL;
  1090. return head;
  1091. }
  1092. EXPORT_SYMBOL(find_type_devices);
  1093. /**
  1094. * Returns all nodes linked together
  1095. */
  1096. struct device_node *find_all_nodes(void)
  1097. {
  1098. struct device_node *head, **prevp, *np;
  1099. prevp = &head;
  1100. for (np = allnodes; np != 0; np = np->allnext) {
  1101. *prevp = np;
  1102. prevp = &np->next;
  1103. }
  1104. *prevp = NULL;
  1105. return head;
  1106. }
  1107. EXPORT_SYMBOL(find_all_nodes);
  1108. /** Checks if the given "compat" string matches one of the strings in
  1109. * the device's "compatible" property
  1110. */
  1111. int device_is_compatible(struct device_node *device, const char *compat)
  1112. {
  1113. const char* cp;
  1114. int cplen, l;
  1115. cp = (char *) get_property(device, "compatible", &cplen);
  1116. if (cp == NULL)
  1117. return 0;
  1118. while (cplen > 0) {
  1119. if (strncasecmp(cp, compat, strlen(compat)) == 0)
  1120. return 1;
  1121. l = strlen(cp) + 1;
  1122. cp += l;
  1123. cplen -= l;
  1124. }
  1125. return 0;
  1126. }
  1127. EXPORT_SYMBOL(device_is_compatible);
  1128. /**
  1129. * Indicates whether the root node has a given value in its
  1130. * compatible property.
  1131. */
  1132. int machine_is_compatible(const char *compat)
  1133. {
  1134. struct device_node *root;
  1135. int rc = 0;
  1136. root = of_find_node_by_path("/");
  1137. if (root) {
  1138. rc = device_is_compatible(root, compat);
  1139. of_node_put(root);
  1140. }
  1141. return rc;
  1142. }
  1143. EXPORT_SYMBOL(machine_is_compatible);
  1144. /**
  1145. * Construct and return a list of the device_nodes with a given type
  1146. * and compatible property.
  1147. */
  1148. struct device_node *find_compatible_devices(const char *type,
  1149. const char *compat)
  1150. {
  1151. struct device_node *head, **prevp, *np;
  1152. prevp = &head;
  1153. for (np = allnodes; np != 0; np = np->allnext) {
  1154. if (type != NULL
  1155. && !(np->type != 0 && strcasecmp(np->type, type) == 0))
  1156. continue;
  1157. if (device_is_compatible(np, compat)) {
  1158. *prevp = np;
  1159. prevp = &np->next;
  1160. }
  1161. }
  1162. *prevp = NULL;
  1163. return head;
  1164. }
  1165. EXPORT_SYMBOL(find_compatible_devices);
  1166. /**
  1167. * Find the device_node with a given full_name.
  1168. */
  1169. struct device_node *find_path_device(const char *path)
  1170. {
  1171. struct device_node *np;
  1172. for (np = allnodes; np != 0; np = np->allnext)
  1173. if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
  1174. return np;
  1175. return NULL;
  1176. }
  1177. EXPORT_SYMBOL(find_path_device);
  1178. /*******
  1179. *
  1180. * New implementation of the OF "find" APIs, return a refcounted
  1181. * object, call of_node_put() when done. The device tree and list
  1182. * are protected by a rw_lock.
  1183. *
  1184. * Note that property management will need some locking as well,
  1185. * this isn't dealt with yet.
  1186. *
  1187. *******/
  1188. /**
  1189. * of_find_node_by_name - Find a node by its "name" property
  1190. * @from: The node to start searching from or NULL, the node
  1191. * you pass will not be searched, only the next one
  1192. * will; typically, you pass what the previous call
  1193. * returned. of_node_put() will be called on it
  1194. * @name: The name string to match against
  1195. *
  1196. * Returns a node pointer with refcount incremented, use
  1197. * of_node_put() on it when done.
  1198. */
  1199. struct device_node *of_find_node_by_name(struct device_node *from,
  1200. const char *name)
  1201. {
  1202. struct device_node *np;
  1203. read_lock(&devtree_lock);
  1204. np = from ? from->allnext : allnodes;
  1205. for (; np != 0; np = np->allnext)
  1206. if (np->name != 0 && strcasecmp(np->name, name) == 0
  1207. && of_node_get(np))
  1208. break;
  1209. if (from)
  1210. of_node_put(from);
  1211. read_unlock(&devtree_lock);
  1212. return np;
  1213. }
  1214. EXPORT_SYMBOL(of_find_node_by_name);
  1215. /**
  1216. * of_find_node_by_type - Find a node by its "device_type" property
  1217. * @from: The node to start searching from or NULL, the node
  1218. * you pass will not be searched, only the next one
  1219. * will; typically, you pass what the previous call
  1220. * returned. of_node_put() will be called on it
  1221. * @name: The type string to match against
  1222. *
  1223. * Returns a node pointer with refcount incremented, use
  1224. * of_node_put() on it when done.
  1225. */
  1226. struct device_node *of_find_node_by_type(struct device_node *from,
  1227. const char *type)
  1228. {
  1229. struct device_node *np;
  1230. read_lock(&devtree_lock);
  1231. np = from ? from->allnext : allnodes;
  1232. for (; np != 0; np = np->allnext)
  1233. if (np->type != 0 && strcasecmp(np->type, type) == 0
  1234. && of_node_get(np))
  1235. break;
  1236. if (from)
  1237. of_node_put(from);
  1238. read_unlock(&devtree_lock);
  1239. return np;
  1240. }
  1241. EXPORT_SYMBOL(of_find_node_by_type);
  1242. /**
  1243. * of_find_compatible_node - Find a node based on type and one of the
  1244. * tokens in its "compatible" property
  1245. * @from: The node to start searching from or NULL, the node
  1246. * you pass will not be searched, only the next one
  1247. * will; typically, you pass what the previous call
  1248. * returned. of_node_put() will be called on it
  1249. * @type: The type string to match "device_type" or NULL to ignore
  1250. * @compatible: The string to match to one of the tokens in the device
  1251. * "compatible" list.
  1252. *
  1253. * Returns a node pointer with refcount incremented, use
  1254. * of_node_put() on it when done.
  1255. */
  1256. struct device_node *of_find_compatible_node(struct device_node *from,
  1257. const char *type, const char *compatible)
  1258. {
  1259. struct device_node *np;
  1260. read_lock(&devtree_lock);
  1261. np = from ? from->allnext : allnodes;
  1262. for (; np != 0; np = np->allnext) {
  1263. if (type != NULL
  1264. && !(np->type != 0 && strcasecmp(np->type, type) == 0))
  1265. continue;
  1266. if (device_is_compatible(np, compatible) && of_node_get(np))
  1267. break;
  1268. }
  1269. if (from)
  1270. of_node_put(from);
  1271. read_unlock(&devtree_lock);
  1272. return np;
  1273. }
  1274. EXPORT_SYMBOL(of_find_compatible_node);
  1275. /**
  1276. * of_find_node_by_path - Find a node matching a full OF path
  1277. * @path: The full path to match
  1278. *
  1279. * Returns a node pointer with refcount incremented, use
  1280. * of_node_put() on it when done.
  1281. */
  1282. struct device_node *of_find_node_by_path(const char *path)
  1283. {
  1284. struct device_node *np = allnodes;
  1285. read_lock(&devtree_lock);
  1286. for (; np != 0; np = np->allnext) {
  1287. if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
  1288. && of_node_get(np))
  1289. break;
  1290. }
  1291. read_unlock(&devtree_lock);
  1292. return np;
  1293. }
  1294. EXPORT_SYMBOL(of_find_node_by_path);
  1295. /**
  1296. * of_find_node_by_phandle - Find a node given a phandle
  1297. * @handle: phandle of the node to find
  1298. *
  1299. * Returns a node pointer with refcount incremented, use
  1300. * of_node_put() on it when done.
  1301. */
  1302. struct device_node *of_find_node_by_phandle(phandle handle)
  1303. {
  1304. struct device_node *np;
  1305. read_lock(&devtree_lock);
  1306. for (np = allnodes; np != 0; np = np->allnext)
  1307. if (np->linux_phandle == handle)
  1308. break;
  1309. if (np)
  1310. of_node_get(np);
  1311. read_unlock(&devtree_lock);
  1312. return np;
  1313. }
  1314. EXPORT_SYMBOL(of_find_node_by_phandle);
  1315. /**
  1316. * of_find_all_nodes - Get next node in global list
  1317. * @prev: Previous node or NULL to start iteration
  1318. * of_node_put() will be called on it
  1319. *
  1320. * Returns a node pointer with refcount incremented, use
  1321. * of_node_put() on it when done.
  1322. */
  1323. struct device_node *of_find_all_nodes(struct device_node *prev)
  1324. {
  1325. struct device_node *np;
  1326. read_lock(&devtree_lock);
  1327. np = prev ? prev->allnext : allnodes;
  1328. for (; np != 0; np = np->allnext)
  1329. if (of_node_get(np))
  1330. break;
  1331. if (prev)
  1332. of_node_put(prev);
  1333. read_unlock(&devtree_lock);
  1334. return np;
  1335. }
  1336. EXPORT_SYMBOL(of_find_all_nodes);
  1337. /**
  1338. * of_get_parent - Get a node's parent if any
  1339. * @node: Node to get parent
  1340. *
  1341. * Returns a node pointer with refcount incremented, use
  1342. * of_node_put() on it when done.
  1343. */
  1344. struct device_node *of_get_parent(const struct device_node *node)
  1345. {
  1346. struct device_node *np;
  1347. if (!node)
  1348. return NULL;
  1349. read_lock(&devtree_lock);
  1350. np = of_node_get(node->parent);
  1351. read_unlock(&devtree_lock);
  1352. return np;
  1353. }
  1354. EXPORT_SYMBOL(of_get_parent);
  1355. /**
  1356. * of_get_next_child - Iterate a node childs
  1357. * @node: parent node
  1358. * @prev: previous child of the parent node, or NULL to get first
  1359. *
  1360. * Returns a node pointer with refcount incremented, use
  1361. * of_node_put() on it when done.
  1362. */
  1363. struct device_node *of_get_next_child(const struct device_node *node,
  1364. struct device_node *prev)
  1365. {
  1366. struct device_node *next;
  1367. read_lock(&devtree_lock);
  1368. next = prev ? prev->sibling : node->child;
  1369. for (; next != 0; next = next->sibling)
  1370. if (of_node_get(next))
  1371. break;
  1372. if (prev)
  1373. of_node_put(prev);
  1374. read_unlock(&devtree_lock);
  1375. return next;
  1376. }
  1377. EXPORT_SYMBOL(of_get_next_child);
  1378. /**
  1379. * of_node_get - Increment refcount of a node
  1380. * @node: Node to inc refcount, NULL is supported to
  1381. * simplify writing of callers
  1382. *
  1383. * Returns node.
  1384. */
  1385. struct device_node *of_node_get(struct device_node *node)
  1386. {
  1387. if (node)
  1388. kref_get(&node->kref);
  1389. return node;
  1390. }
  1391. EXPORT_SYMBOL(of_node_get);
  1392. static inline struct device_node * kref_to_device_node(struct kref *kref)
  1393. {
  1394. return container_of(kref, struct device_node, kref);
  1395. }
  1396. /**
  1397. * of_node_release - release a dynamically allocated node
  1398. * @kref: kref element of the node to be released
  1399. *
  1400. * In of_node_put() this function is passed to kref_put()
  1401. * as the destructor.
  1402. */
  1403. static void of_node_release(struct kref *kref)
  1404. {
  1405. struct device_node *node = kref_to_device_node(kref);
  1406. struct property *prop = node->properties;
  1407. if (!OF_IS_DYNAMIC(node))
  1408. return;
  1409. while (prop) {
  1410. struct property *next = prop->next;
  1411. kfree(prop->name);
  1412. kfree(prop->value);
  1413. kfree(prop);
  1414. prop = next;
  1415. }
  1416. kfree(node->intrs);
  1417. kfree(node->full_name);
  1418. kfree(node->data);
  1419. kfree(node);
  1420. }
  1421. /**
  1422. * of_node_put - Decrement refcount of a node
  1423. * @node: Node to dec refcount, NULL is supported to
  1424. * simplify writing of callers
  1425. *
  1426. */
  1427. void of_node_put(struct device_node *node)
  1428. {
  1429. if (node)
  1430. kref_put(&node->kref, of_node_release);
  1431. }
  1432. EXPORT_SYMBOL(of_node_put);
  1433. /*
  1434. * Plug a device node into the tree and global list.
  1435. */
  1436. void of_attach_node(struct device_node *np)
  1437. {
  1438. write_lock(&devtree_lock);
  1439. np->sibling = np->parent->child;
  1440. np->allnext = allnodes;
  1441. np->parent->child = np;
  1442. allnodes = np;
  1443. write_unlock(&devtree_lock);
  1444. }
  1445. /*
  1446. * "Unplug" a node from the device tree. The caller must hold
  1447. * a reference to the node. The memory associated with the node
  1448. * is not freed until its refcount goes to zero.
  1449. */
  1450. void of_detach_node(const struct device_node *np)
  1451. {
  1452. struct device_node *parent;
  1453. write_lock(&devtree_lock);
  1454. parent = np->parent;
  1455. if (allnodes == np)
  1456. allnodes = np->allnext;
  1457. else {
  1458. struct device_node *prev;
  1459. for (prev = allnodes;
  1460. prev->allnext != np;
  1461. prev = prev->allnext)
  1462. ;
  1463. prev->allnext = np->allnext;
  1464. }
  1465. if (parent->child == np)
  1466. parent->child = np->sibling;
  1467. else {
  1468. struct device_node *prevsib;
  1469. for (prevsib = np->parent->child;
  1470. prevsib->sibling != np;
  1471. prevsib = prevsib->sibling)
  1472. ;
  1473. prevsib->sibling = np->sibling;
  1474. }
  1475. write_unlock(&devtree_lock);
  1476. }
  1477. #ifdef CONFIG_PPC_PSERIES
  1478. /*
  1479. * Fix up the uninitialized fields in a new device node:
  1480. * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
  1481. *
  1482. * A lot of boot-time code is duplicated here, because functions such
  1483. * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
  1484. * slab allocator.
  1485. *
  1486. * This should probably be split up into smaller chunks.
  1487. */
  1488. static int of_finish_dynamic_node(struct device_node *node)
  1489. {
  1490. struct device_node *parent = of_get_parent(node);
  1491. int err = 0;
  1492. phandle *ibm_phandle;
  1493. node->name = get_property(node, "name", NULL);
  1494. node->type = get_property(node, "device_type", NULL);
  1495. if (!parent) {
  1496. err = -ENODEV;
  1497. goto out;
  1498. }
  1499. /* We don't support that function on PowerMac, at least
  1500. * not yet
  1501. */
  1502. if (_machine == PLATFORM_POWERMAC)
  1503. return -ENODEV;
  1504. /* fix up new node's linux_phandle field */
  1505. if ((ibm_phandle = (unsigned int *)get_property(node,
  1506. "ibm,phandle", NULL)))
  1507. node->linux_phandle = *ibm_phandle;
  1508. out:
  1509. of_node_put(parent);
  1510. return err;
  1511. }
  1512. static int prom_reconfig_notifier(struct notifier_block *nb,
  1513. unsigned long action, void *node)
  1514. {
  1515. int err;
  1516. switch (action) {
  1517. case PSERIES_RECONFIG_ADD:
  1518. err = of_finish_dynamic_node(node);
  1519. if (!err)
  1520. finish_node(node, NULL, 0);
  1521. if (err < 0) {
  1522. printk(KERN_ERR "finish_node returned %d\n", err);
  1523. err = NOTIFY_BAD;
  1524. }
  1525. break;
  1526. default:
  1527. err = NOTIFY_DONE;
  1528. break;
  1529. }
  1530. return err;
  1531. }
  1532. static struct notifier_block prom_reconfig_nb = {
  1533. .notifier_call = prom_reconfig_notifier,
  1534. .priority = 10, /* This one needs to run first */
  1535. };
  1536. static int __init prom_reconfig_setup(void)
  1537. {
  1538. return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
  1539. }
  1540. __initcall(prom_reconfig_setup);
  1541. #endif
  1542. /*
  1543. * Find a property with a given name for a given node
  1544. * and return the value.
  1545. */
  1546. unsigned char *get_property(struct device_node *np, const char *name,
  1547. int *lenp)
  1548. {
  1549. struct property *pp;
  1550. for (pp = np->properties; pp != 0; pp = pp->next)
  1551. if (strcmp(pp->name, name) == 0) {
  1552. if (lenp != 0)
  1553. *lenp = pp->length;
  1554. return pp->value;
  1555. }
  1556. return NULL;
  1557. }
  1558. EXPORT_SYMBOL(get_property);
  1559. /*
  1560. * Add a property to a node
  1561. */
  1562. int prom_add_property(struct device_node* np, struct property* prop)
  1563. {
  1564. struct property **next;
  1565. prop->next = NULL;
  1566. write_lock(&devtree_lock);
  1567. next = &np->properties;
  1568. while (*next) {
  1569. if (strcmp(prop->name, (*next)->name) == 0) {
  1570. /* duplicate ! don't insert it */
  1571. write_unlock(&devtree_lock);
  1572. return -1;
  1573. }
  1574. next = &(*next)->next;
  1575. }
  1576. *next = prop;
  1577. write_unlock(&devtree_lock);
  1578. #ifdef CONFIG_PROC_DEVICETREE
  1579. /* try to add to proc as well if it was initialized */
  1580. if (np->pde)
  1581. proc_device_tree_add_prop(np->pde, prop);
  1582. #endif /* CONFIG_PROC_DEVICETREE */
  1583. return 0;
  1584. }