setup_64.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037
  1. /*
  2. *
  3. * Common boot and setup code.
  4. *
  5. * Copyright (C) 2001 PPC64 Team, IBM Corp
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG
  13. #include <linux/config.h>
  14. #include <linux/module.h>
  15. #include <linux/string.h>
  16. #include <linux/sched.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/reboot.h>
  20. #include <linux/delay.h>
  21. #include <linux/initrd.h>
  22. #include <linux/ide.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/ioport.h>
  25. #include <linux/console.h>
  26. #include <linux/utsname.h>
  27. #include <linux/tty.h>
  28. #include <linux/root_dev.h>
  29. #include <linux/notifier.h>
  30. #include <linux/cpu.h>
  31. #include <linux/unistd.h>
  32. #include <linux/serial.h>
  33. #include <linux/serial_8250.h>
  34. #include <asm/io.h>
  35. #include <asm/prom.h>
  36. #include <asm/processor.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/smp.h>
  39. #include <asm/elf.h>
  40. #include <asm/machdep.h>
  41. #include <asm/paca.h>
  42. #include <asm/ppcdebug.h>
  43. #include <asm/time.h>
  44. #include <asm/cputable.h>
  45. #include <asm/sections.h>
  46. #include <asm/btext.h>
  47. #include <asm/nvram.h>
  48. #include <asm/setup.h>
  49. #include <asm/system.h>
  50. #include <asm/rtas.h>
  51. #include <asm/iommu.h>
  52. #include <asm/serial.h>
  53. #include <asm/cache.h>
  54. #include <asm/page.h>
  55. #include <asm/mmu.h>
  56. #include <asm/lmb.h>
  57. #include <asm/iSeries/ItLpNaca.h>
  58. #include <asm/firmware.h>
  59. #include <asm/systemcfg.h>
  60. #include <asm/xmon.h>
  61. #ifdef DEBUG
  62. #define DBG(fmt...) udbg_printf(fmt)
  63. #else
  64. #define DBG(fmt...)
  65. #endif
  66. /*
  67. * Here are some early debugging facilities. You can enable one
  68. * but your kernel will not boot on anything else if you do so
  69. */
  70. /* This one is for use on LPAR machines that support an HVC console
  71. * on vterm 0
  72. */
  73. extern void udbg_init_debug_lpar(void);
  74. /* This one is for use on Apple G5 machines
  75. */
  76. extern void udbg_init_pmac_realmode(void);
  77. /* That's RTAS panel debug */
  78. extern void call_rtas_display_status_delay(unsigned char c);
  79. /* Here's maple real mode debug */
  80. extern void udbg_init_maple_realmode(void);
  81. #define EARLY_DEBUG_INIT() do {} while(0)
  82. #if 0
  83. #define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
  84. #define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
  85. #define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
  86. #define EARLY_DEBUG_INIT() \
  87. do { udbg_putc = call_rtas_display_status_delay; } while(0)
  88. #endif
  89. /* extern void *stab; */
  90. extern unsigned long klimit;
  91. extern void mm_init_ppc64(void);
  92. extern void stab_initialize(unsigned long stab);
  93. extern void htab_initialize(void);
  94. extern void early_init_devtree(void *flat_dt);
  95. extern void unflatten_device_tree(void);
  96. extern void smp_release_cpus(void);
  97. int have_of = 1;
  98. int boot_cpuid = 0;
  99. int boot_cpuid_phys = 0;
  100. dev_t boot_dev;
  101. u64 ppc64_pft_size;
  102. struct ppc64_caches ppc64_caches;
  103. EXPORT_SYMBOL_GPL(ppc64_caches);
  104. /*
  105. * These are used in binfmt_elf.c to put aux entries on the stack
  106. * for each elf executable being started.
  107. */
  108. int dcache_bsize;
  109. int icache_bsize;
  110. int ucache_bsize;
  111. /* The main machine-dep calls structure
  112. */
  113. struct machdep_calls ppc_md;
  114. EXPORT_SYMBOL(ppc_md);
  115. #ifdef CONFIG_MAGIC_SYSRQ
  116. unsigned long SYSRQ_KEY;
  117. #endif /* CONFIG_MAGIC_SYSRQ */
  118. static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
  119. static struct notifier_block ppc64_panic_block = {
  120. .notifier_call = ppc64_panic_event,
  121. .priority = INT_MIN /* may not return; must be done last */
  122. };
  123. #ifdef CONFIG_SMP
  124. static int smt_enabled_cmdline;
  125. /* Look for ibm,smt-enabled OF option */
  126. static void check_smt_enabled(void)
  127. {
  128. struct device_node *dn;
  129. char *smt_option;
  130. /* Allow the command line to overrule the OF option */
  131. if (smt_enabled_cmdline)
  132. return;
  133. dn = of_find_node_by_path("/options");
  134. if (dn) {
  135. smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
  136. if (smt_option) {
  137. if (!strcmp(smt_option, "on"))
  138. smt_enabled_at_boot = 1;
  139. else if (!strcmp(smt_option, "off"))
  140. smt_enabled_at_boot = 0;
  141. }
  142. }
  143. }
  144. /* Look for smt-enabled= cmdline option */
  145. static int __init early_smt_enabled(char *p)
  146. {
  147. smt_enabled_cmdline = 1;
  148. if (!p)
  149. return 0;
  150. if (!strcmp(p, "on") || !strcmp(p, "1"))
  151. smt_enabled_at_boot = 1;
  152. else if (!strcmp(p, "off") || !strcmp(p, "0"))
  153. smt_enabled_at_boot = 0;
  154. return 0;
  155. }
  156. early_param("smt-enabled", early_smt_enabled);
  157. /**
  158. * setup_cpu_maps - initialize the following cpu maps:
  159. * cpu_possible_map
  160. * cpu_present_map
  161. * cpu_sibling_map
  162. *
  163. * Having the possible map set up early allows us to restrict allocations
  164. * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
  165. *
  166. * We do not initialize the online map here; cpus set their own bits in
  167. * cpu_online_map as they come up.
  168. *
  169. * This function is valid only for Open Firmware systems. finish_device_tree
  170. * must be called before using this.
  171. *
  172. * While we're here, we may as well set the "physical" cpu ids in the paca.
  173. */
  174. static void __init setup_cpu_maps(void)
  175. {
  176. struct device_node *dn = NULL;
  177. int cpu = 0;
  178. int swap_cpuid = 0;
  179. check_smt_enabled();
  180. while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
  181. u32 *intserv;
  182. int j, len = sizeof(u32), nthreads;
  183. intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
  184. &len);
  185. if (!intserv)
  186. intserv = (u32 *)get_property(dn, "reg", NULL);
  187. nthreads = len / sizeof(u32);
  188. for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
  189. cpu_set(cpu, cpu_present_map);
  190. set_hard_smp_processor_id(cpu, intserv[j]);
  191. if (intserv[j] == boot_cpuid_phys)
  192. swap_cpuid = cpu;
  193. cpu_set(cpu, cpu_possible_map);
  194. cpu++;
  195. }
  196. }
  197. /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
  198. * boot cpu is logical 0.
  199. */
  200. if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
  201. u32 tmp;
  202. tmp = get_hard_smp_processor_id(0);
  203. set_hard_smp_processor_id(0, boot_cpuid_phys);
  204. set_hard_smp_processor_id(swap_cpuid, tmp);
  205. }
  206. /*
  207. * On pSeries LPAR, we need to know how many cpus
  208. * could possibly be added to this partition.
  209. */
  210. if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
  211. (dn = of_find_node_by_path("/rtas"))) {
  212. int num_addr_cell, num_size_cell, maxcpus;
  213. unsigned int *ireg;
  214. num_addr_cell = prom_n_addr_cells(dn);
  215. num_size_cell = prom_n_size_cells(dn);
  216. ireg = (unsigned int *)
  217. get_property(dn, "ibm,lrdr-capacity", NULL);
  218. if (!ireg)
  219. goto out;
  220. maxcpus = ireg[num_addr_cell + num_size_cell];
  221. /* Double maxcpus for processors which have SMT capability */
  222. if (cpu_has_feature(CPU_FTR_SMT))
  223. maxcpus *= 2;
  224. if (maxcpus > NR_CPUS) {
  225. printk(KERN_WARNING
  226. "Partition configured for %d cpus, "
  227. "operating system maximum is %d.\n",
  228. maxcpus, NR_CPUS);
  229. maxcpus = NR_CPUS;
  230. } else
  231. printk(KERN_INFO "Partition configured for %d cpus.\n",
  232. maxcpus);
  233. for (cpu = 0; cpu < maxcpus; cpu++)
  234. cpu_set(cpu, cpu_possible_map);
  235. out:
  236. of_node_put(dn);
  237. }
  238. /*
  239. * Do the sibling map; assume only two threads per processor.
  240. */
  241. for_each_cpu(cpu) {
  242. cpu_set(cpu, cpu_sibling_map[cpu]);
  243. if (cpu_has_feature(CPU_FTR_SMT))
  244. cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
  245. }
  246. systemcfg->processorCount = num_present_cpus();
  247. }
  248. #endif /* CONFIG_SMP */
  249. extern struct machdep_calls pSeries_md;
  250. extern struct machdep_calls pmac_md;
  251. extern struct machdep_calls maple_md;
  252. extern struct machdep_calls bpa_md;
  253. extern struct machdep_calls iseries_md;
  254. /* Ultimately, stuff them in an elf section like initcalls... */
  255. static struct machdep_calls __initdata *machines[] = {
  256. #ifdef CONFIG_PPC_PSERIES
  257. &pSeries_md,
  258. #endif /* CONFIG_PPC_PSERIES */
  259. #ifdef CONFIG_PPC_PMAC
  260. &pmac_md,
  261. #endif /* CONFIG_PPC_PMAC */
  262. #ifdef CONFIG_PPC_MAPLE
  263. &maple_md,
  264. #endif /* CONFIG_PPC_MAPLE */
  265. #ifdef CONFIG_PPC_BPA
  266. &bpa_md,
  267. #endif
  268. #ifdef CONFIG_PPC_ISERIES
  269. &iseries_md,
  270. #endif
  271. NULL
  272. };
  273. /*
  274. * Early initialization entry point. This is called by head.S
  275. * with MMU translation disabled. We rely on the "feature" of
  276. * the CPU that ignores the top 2 bits of the address in real
  277. * mode so we can access kernel globals normally provided we
  278. * only toy with things in the RMO region. From here, we do
  279. * some early parsing of the device-tree to setup out LMB
  280. * data structures, and allocate & initialize the hash table
  281. * and segment tables so we can start running with translation
  282. * enabled.
  283. *
  284. * It is this function which will call the probe() callback of
  285. * the various platform types and copy the matching one to the
  286. * global ppc_md structure. Your platform can eventually do
  287. * some very early initializations from the probe() routine, but
  288. * this is not recommended, be very careful as, for example, the
  289. * device-tree is not accessible via normal means at this point.
  290. */
  291. void __init early_setup(unsigned long dt_ptr)
  292. {
  293. struct paca_struct *lpaca = get_paca();
  294. static struct machdep_calls **mach;
  295. /*
  296. * Enable early debugging if any specified (see top of
  297. * this file)
  298. */
  299. EARLY_DEBUG_INIT();
  300. DBG(" -> early_setup()\n");
  301. /*
  302. * Fill the default DBG level (do we want to keep
  303. * that old mecanism around forever ?)
  304. */
  305. ppcdbg_initialize();
  306. /*
  307. * Do early initializations using the flattened device
  308. * tree, like retreiving the physical memory map or
  309. * calculating/retreiving the hash table size
  310. */
  311. early_init_devtree(__va(dt_ptr));
  312. /*
  313. * Iterate all ppc_md structures until we find the proper
  314. * one for the current machine type
  315. */
  316. DBG("Probing machine type for platform %x...\n",
  317. systemcfg->platform);
  318. for (mach = machines; *mach; mach++) {
  319. if ((*mach)->probe(systemcfg->platform))
  320. break;
  321. }
  322. /* What can we do if we didn't find ? */
  323. if (*mach == NULL) {
  324. DBG("No suitable machine found !\n");
  325. for (;;);
  326. }
  327. ppc_md = **mach;
  328. DBG("Found, Initializing memory management...\n");
  329. /*
  330. * Initialize stab / SLB management
  331. */
  332. if (!firmware_has_feature(FW_FEATURE_ISERIES))
  333. stab_initialize(lpaca->stab_real);
  334. /*
  335. * Initialize the MMU Hash table and create the linear mapping
  336. * of memory
  337. */
  338. htab_initialize();
  339. DBG(" <- early_setup()\n");
  340. }
  341. /*
  342. * Initialize some remaining members of the ppc64_caches and systemcfg structures
  343. * (at least until we get rid of them completely). This is mostly some
  344. * cache informations about the CPU that will be used by cache flush
  345. * routines and/or provided to userland
  346. */
  347. static void __init initialize_cache_info(void)
  348. {
  349. struct device_node *np;
  350. unsigned long num_cpus = 0;
  351. DBG(" -> initialize_cache_info()\n");
  352. for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
  353. num_cpus += 1;
  354. /* We're assuming *all* of the CPUs have the same
  355. * d-cache and i-cache sizes... -Peter
  356. */
  357. if ( num_cpus == 1 ) {
  358. u32 *sizep, *lsizep;
  359. u32 size, lsize;
  360. const char *dc, *ic;
  361. /* Then read cache informations */
  362. if (systemcfg->platform == PLATFORM_POWERMAC) {
  363. dc = "d-cache-block-size";
  364. ic = "i-cache-block-size";
  365. } else {
  366. dc = "d-cache-line-size";
  367. ic = "i-cache-line-size";
  368. }
  369. size = 0;
  370. lsize = cur_cpu_spec->dcache_bsize;
  371. sizep = (u32 *)get_property(np, "d-cache-size", NULL);
  372. if (sizep != NULL)
  373. size = *sizep;
  374. lsizep = (u32 *) get_property(np, dc, NULL);
  375. if (lsizep != NULL)
  376. lsize = *lsizep;
  377. if (sizep == 0 || lsizep == 0)
  378. DBG("Argh, can't find dcache properties ! "
  379. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  380. systemcfg->dcache_size = ppc64_caches.dsize = size;
  381. systemcfg->dcache_line_size =
  382. ppc64_caches.dline_size = lsize;
  383. ppc64_caches.log_dline_size = __ilog2(lsize);
  384. ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
  385. size = 0;
  386. lsize = cur_cpu_spec->icache_bsize;
  387. sizep = (u32 *)get_property(np, "i-cache-size", NULL);
  388. if (sizep != NULL)
  389. size = *sizep;
  390. lsizep = (u32 *)get_property(np, ic, NULL);
  391. if (lsizep != NULL)
  392. lsize = *lsizep;
  393. if (sizep == 0 || lsizep == 0)
  394. DBG("Argh, can't find icache properties ! "
  395. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  396. systemcfg->icache_size = ppc64_caches.isize = size;
  397. systemcfg->icache_line_size =
  398. ppc64_caches.iline_size = lsize;
  399. ppc64_caches.log_iline_size = __ilog2(lsize);
  400. ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
  401. }
  402. }
  403. /* Add an eye catcher and the systemcfg layout version number */
  404. strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
  405. systemcfg->version.major = SYSTEMCFG_MAJOR;
  406. systemcfg->version.minor = SYSTEMCFG_MINOR;
  407. systemcfg->processor = mfspr(SPRN_PVR);
  408. DBG(" <- initialize_cache_info()\n");
  409. }
  410. static void __init check_for_initrd(void)
  411. {
  412. #ifdef CONFIG_BLK_DEV_INITRD
  413. u64 *prop;
  414. DBG(" -> check_for_initrd()\n");
  415. if (of_chosen) {
  416. prop = (u64 *)get_property(of_chosen,
  417. "linux,initrd-start", NULL);
  418. if (prop != NULL) {
  419. initrd_start = (unsigned long)__va(*prop);
  420. prop = (u64 *)get_property(of_chosen,
  421. "linux,initrd-end", NULL);
  422. if (prop != NULL) {
  423. initrd_end = (unsigned long)__va(*prop);
  424. initrd_below_start_ok = 1;
  425. } else
  426. initrd_start = 0;
  427. }
  428. }
  429. /* If we were passed an initrd, set the ROOT_DEV properly if the values
  430. * look sensible. If not, clear initrd reference.
  431. */
  432. if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
  433. initrd_end > initrd_start)
  434. ROOT_DEV = Root_RAM0;
  435. else
  436. initrd_start = initrd_end = 0;
  437. if (initrd_start)
  438. printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
  439. DBG(" <- check_for_initrd()\n");
  440. #endif /* CONFIG_BLK_DEV_INITRD */
  441. }
  442. /*
  443. * Do some initial setup of the system. The parameters are those which
  444. * were passed in from the bootloader.
  445. */
  446. void __init setup_system(void)
  447. {
  448. DBG(" -> setup_system()\n");
  449. /*
  450. * Unflatten the device-tree passed by prom_init or kexec
  451. */
  452. unflatten_device_tree();
  453. /*
  454. * Fill the ppc64_caches & systemcfg structures with informations
  455. * retreived from the device-tree. Need to be called before
  456. * finish_device_tree() since the later requires some of the
  457. * informations filled up here to properly parse the interrupt
  458. * tree.
  459. * It also sets up the cache line sizes which allows to call
  460. * routines like flush_icache_range (used by the hash init
  461. * later on).
  462. */
  463. initialize_cache_info();
  464. #ifdef CONFIG_PPC_RTAS
  465. /*
  466. * Initialize RTAS if available
  467. */
  468. rtas_initialize();
  469. #endif /* CONFIG_PPC_RTAS */
  470. /*
  471. * Check if we have an initrd provided via the device-tree
  472. */
  473. check_for_initrd();
  474. /*
  475. * Do some platform specific early initializations, that includes
  476. * setting up the hash table pointers. It also sets up some interrupt-mapping
  477. * related options that will be used by finish_device_tree()
  478. */
  479. ppc_md.init_early();
  480. /*
  481. * "Finish" the device-tree, that is do the actual parsing of
  482. * some of the properties like the interrupt map
  483. */
  484. finish_device_tree();
  485. #ifdef CONFIG_BOOTX_TEXT
  486. init_boot_display();
  487. #endif
  488. /*
  489. * Initialize xmon
  490. */
  491. #ifdef CONFIG_XMON_DEFAULT
  492. xmon_init(1);
  493. #endif
  494. /*
  495. * Register early console
  496. */
  497. register_early_udbg_console();
  498. /* Save unparsed command line copy for /proc/cmdline */
  499. strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
  500. parse_early_param();
  501. #ifdef CONFIG_SMP
  502. /*
  503. * iSeries has already initialized the cpu maps at this point.
  504. */
  505. setup_cpu_maps();
  506. /* Release secondary cpus out of their spinloops at 0x60 now that
  507. * we can map physical -> logical CPU ids
  508. */
  509. smp_release_cpus();
  510. #endif
  511. printk("Starting Linux PPC64 %s\n", system_utsname.version);
  512. printk("-----------------------------------------------------\n");
  513. printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
  514. printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
  515. printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
  516. printk("systemcfg = 0x%p\n", systemcfg);
  517. printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
  518. printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
  519. printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
  520. printk("ppc64_caches.dcache_line_size = 0x%x\n",
  521. ppc64_caches.dline_size);
  522. printk("ppc64_caches.icache_line_size = 0x%x\n",
  523. ppc64_caches.iline_size);
  524. printk("htab_address = 0x%p\n", htab_address);
  525. printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
  526. printk("-----------------------------------------------------\n");
  527. mm_init_ppc64();
  528. DBG(" <- setup_system()\n");
  529. }
  530. static int ppc64_panic_event(struct notifier_block *this,
  531. unsigned long event, void *ptr)
  532. {
  533. ppc_md.panic((char *)ptr); /* May not return */
  534. return NOTIFY_DONE;
  535. }
  536. /*
  537. * These three variables are used to save values passed to us by prom_init()
  538. * via the device tree. The TCE variables are needed because with a memory_limit
  539. * in force we may need to explicitly map the TCE are at the top of RAM.
  540. */
  541. unsigned long memory_limit;
  542. unsigned long tce_alloc_start;
  543. unsigned long tce_alloc_end;
  544. #ifdef CONFIG_PPC_ISERIES
  545. /*
  546. * On iSeries we just parse the mem=X option from the command line.
  547. * On pSeries it's a bit more complicated, see prom_init_mem()
  548. */
  549. static int __init early_parsemem(char *p)
  550. {
  551. if (!p)
  552. return 0;
  553. memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
  554. return 0;
  555. }
  556. early_param("mem", early_parsemem);
  557. #endif /* CONFIG_PPC_ISERIES */
  558. #ifdef CONFIG_IRQSTACKS
  559. static void __init irqstack_early_init(void)
  560. {
  561. unsigned int i;
  562. /*
  563. * interrupt stacks must be under 256MB, we cannot afford to take
  564. * SLB misses on them.
  565. */
  566. for_each_cpu(i) {
  567. softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  568. THREAD_SIZE, 0x10000000));
  569. hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  570. THREAD_SIZE, 0x10000000));
  571. }
  572. }
  573. #else
  574. #define irqstack_early_init()
  575. #endif
  576. /*
  577. * Stack space used when we detect a bad kernel stack pointer, and
  578. * early in SMP boots before relocation is enabled.
  579. */
  580. static void __init emergency_stack_init(void)
  581. {
  582. unsigned long limit;
  583. unsigned int i;
  584. /*
  585. * Emergency stacks must be under 256MB, we cannot afford to take
  586. * SLB misses on them. The ABI also requires them to be 128-byte
  587. * aligned.
  588. *
  589. * Since we use these as temporary stacks during secondary CPU
  590. * bringup, we need to get at them in real mode. This means they
  591. * must also be within the RMO region.
  592. */
  593. limit = min(0x10000000UL, lmb.rmo_size);
  594. for_each_cpu(i)
  595. paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
  596. limit)) + PAGE_SIZE;
  597. }
  598. /*
  599. * Called from setup_arch to initialize the bitmap of available
  600. * syscalls in the systemcfg page
  601. */
  602. void __init setup_syscall_map(void)
  603. {
  604. unsigned int i, count64 = 0, count32 = 0;
  605. extern unsigned long *sys_call_table;
  606. extern unsigned long sys_ni_syscall;
  607. for (i = 0; i < __NR_syscalls; i++) {
  608. if (sys_call_table[i*2] != sys_ni_syscall) {
  609. count64++;
  610. systemcfg->syscall_map_64[i >> 5] |=
  611. 0x80000000UL >> (i & 0x1f);
  612. }
  613. if (sys_call_table[i*2+1] != sys_ni_syscall) {
  614. count32++;
  615. systemcfg->syscall_map_32[i >> 5] |=
  616. 0x80000000UL >> (i & 0x1f);
  617. }
  618. }
  619. printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
  620. count32, count64);
  621. }
  622. /*
  623. * Called into from start_kernel, after lock_kernel has been called.
  624. * Initializes bootmem, which is unsed to manage page allocation until
  625. * mem_init is called.
  626. */
  627. void __init setup_arch(char **cmdline_p)
  628. {
  629. extern void do_init_bootmem(void);
  630. ppc64_boot_msg(0x12, "Setup Arch");
  631. *cmdline_p = cmd_line;
  632. /*
  633. * Set cache line size based on type of cpu as a default.
  634. * Systems with OF can look in the properties on the cpu node(s)
  635. * for a possibly more accurate value.
  636. */
  637. dcache_bsize = ppc64_caches.dline_size;
  638. icache_bsize = ppc64_caches.iline_size;
  639. /* reboot on panic */
  640. panic_timeout = 180;
  641. if (ppc_md.panic)
  642. notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
  643. init_mm.start_code = PAGE_OFFSET;
  644. init_mm.end_code = (unsigned long) _etext;
  645. init_mm.end_data = (unsigned long) _edata;
  646. init_mm.brk = klimit;
  647. irqstack_early_init();
  648. emergency_stack_init();
  649. stabs_alloc();
  650. /* set up the bootmem stuff with available memory */
  651. do_init_bootmem();
  652. sparse_init();
  653. /* initialize the syscall map in systemcfg */
  654. setup_syscall_map();
  655. #ifdef CONFIG_DUMMY_CONSOLE
  656. conswitchp = &dummy_con;
  657. #endif
  658. ppc_md.setup_arch();
  659. /* Use the default idle loop if the platform hasn't provided one. */
  660. if (NULL == ppc_md.idle_loop) {
  661. ppc_md.idle_loop = default_idle;
  662. printk(KERN_INFO "Using default idle loop\n");
  663. }
  664. paging_init();
  665. ppc64_boot_msg(0x15, "Setup Done");
  666. }
  667. /* ToDo: do something useful if ppc_md is not yet setup. */
  668. #define PPC64_LINUX_FUNCTION 0x0f000000
  669. #define PPC64_IPL_MESSAGE 0xc0000000
  670. #define PPC64_TERM_MESSAGE 0xb0000000
  671. static void ppc64_do_msg(unsigned int src, const char *msg)
  672. {
  673. if (ppc_md.progress) {
  674. char buf[128];
  675. sprintf(buf, "%08X\n", src);
  676. ppc_md.progress(buf, 0);
  677. snprintf(buf, 128, "%s", msg);
  678. ppc_md.progress(buf, 0);
  679. }
  680. }
  681. /* Print a boot progress message. */
  682. void ppc64_boot_msg(unsigned int src, const char *msg)
  683. {
  684. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
  685. printk("[boot]%04x %s\n", src, msg);
  686. }
  687. /* Print a termination message (print only -- does not stop the kernel) */
  688. void ppc64_terminate_msg(unsigned int src, const char *msg)
  689. {
  690. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
  691. printk("[terminate]%04x %s\n", src, msg);
  692. }
  693. #ifndef CONFIG_PPC_ISERIES
  694. /*
  695. * This function can be used by platforms to "find" legacy serial ports.
  696. * It works for "serial" nodes under an "isa" node, and will try to
  697. * respect the "ibm,aix-loc" property if any. It works with up to 8
  698. * ports.
  699. */
  700. #define MAX_LEGACY_SERIAL_PORTS 8
  701. static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
  702. static unsigned int old_serial_count;
  703. void __init generic_find_legacy_serial_ports(u64 *physport,
  704. unsigned int *default_speed)
  705. {
  706. struct device_node *np;
  707. u32 *sizeprop;
  708. struct isa_reg_property {
  709. u32 space;
  710. u32 address;
  711. u32 size;
  712. };
  713. struct pci_reg_property {
  714. struct pci_address addr;
  715. u32 size_hi;
  716. u32 size_lo;
  717. };
  718. DBG(" -> generic_find_legacy_serial_port()\n");
  719. *physport = 0;
  720. if (default_speed)
  721. *default_speed = 0;
  722. np = of_find_node_by_path("/");
  723. if (!np)
  724. return;
  725. /* First fill our array */
  726. for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
  727. struct device_node *isa, *pci;
  728. struct isa_reg_property *reg;
  729. unsigned long phys_size, addr_size, io_base;
  730. u32 *rangesp;
  731. u32 *interrupts, *clk, *spd;
  732. char *typep;
  733. int index, rlen, rentsize;
  734. /* Ok, first check if it's under an "isa" parent */
  735. isa = of_get_parent(np);
  736. if (!isa || strcmp(isa->name, "isa")) {
  737. DBG("%s: no isa parent found\n", np->full_name);
  738. continue;
  739. }
  740. /* Now look for an "ibm,aix-loc" property that gives us ordering
  741. * if any...
  742. */
  743. typep = (char *)get_property(np, "ibm,aix-loc", NULL);
  744. /* Get the ISA port number */
  745. reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
  746. if (reg == NULL)
  747. goto next_port;
  748. /* We assume the interrupt number isn't translated ... */
  749. interrupts = (u32 *)get_property(np, "interrupts", NULL);
  750. /* get clock freq. if present */
  751. clk = (u32 *)get_property(np, "clock-frequency", NULL);
  752. /* get default speed if present */
  753. spd = (u32 *)get_property(np, "current-speed", NULL);
  754. /* Default to locate at end of array */
  755. index = old_serial_count; /* end of the array by default */
  756. /* If we have a location index, then use it */
  757. if (typep && *typep == 'S') {
  758. index = simple_strtol(typep+1, NULL, 0) - 1;
  759. /* if index is out of range, use end of array instead */
  760. if (index >= MAX_LEGACY_SERIAL_PORTS)
  761. index = old_serial_count;
  762. /* if our index is still out of range, that mean that
  763. * array is full, we could scan for a free slot but that
  764. * make little sense to bother, just skip the port
  765. */
  766. if (index >= MAX_LEGACY_SERIAL_PORTS)
  767. goto next_port;
  768. if (index >= old_serial_count)
  769. old_serial_count = index + 1;
  770. /* Check if there is a port who already claimed our slot */
  771. if (serial_ports[index].iobase != 0) {
  772. /* if we still have some room, move it, else override */
  773. if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
  774. DBG("Moved legacy port %d -> %d\n", index,
  775. old_serial_count);
  776. serial_ports[old_serial_count++] =
  777. serial_ports[index];
  778. } else {
  779. DBG("Replacing legacy port %d\n", index);
  780. }
  781. }
  782. }
  783. if (index >= MAX_LEGACY_SERIAL_PORTS)
  784. goto next_port;
  785. if (index >= old_serial_count)
  786. old_serial_count = index + 1;
  787. /* Now fill the entry */
  788. memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
  789. serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
  790. serial_ports[index].iobase = reg->address;
  791. serial_ports[index].irq = interrupts ? interrupts[0] : 0;
  792. serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
  793. DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
  794. index,
  795. serial_ports[index].iobase,
  796. serial_ports[index].irq,
  797. serial_ports[index].uartclk);
  798. /* Get phys address of IO reg for port 1 */
  799. if (index != 0)
  800. goto next_port;
  801. pci = of_get_parent(isa);
  802. if (!pci) {
  803. DBG("%s: no pci parent found\n", np->full_name);
  804. goto next_port;
  805. }
  806. rangesp = (u32 *)get_property(pci, "ranges", &rlen);
  807. if (rangesp == NULL) {
  808. of_node_put(pci);
  809. goto next_port;
  810. }
  811. rlen /= 4;
  812. /* we need the #size-cells of the PCI bridge node itself */
  813. phys_size = 1;
  814. sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
  815. if (sizeprop != NULL)
  816. phys_size = *sizeprop;
  817. /* we need the parent #addr-cells */
  818. addr_size = prom_n_addr_cells(pci);
  819. rentsize = 3 + addr_size + phys_size;
  820. io_base = 0;
  821. for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
  822. if (((rangesp[0] >> 24) & 0x3) != 1)
  823. continue; /* not IO space */
  824. io_base = rangesp[3];
  825. if (addr_size == 2)
  826. io_base = (io_base << 32) | rangesp[4];
  827. }
  828. if (io_base != 0) {
  829. *physport = io_base + reg->address;
  830. if (default_speed && spd)
  831. *default_speed = *spd;
  832. }
  833. of_node_put(pci);
  834. next_port:
  835. of_node_put(isa);
  836. }
  837. DBG(" <- generic_find_legacy_serial_port()\n");
  838. }
  839. static struct platform_device serial_device = {
  840. .name = "serial8250",
  841. .id = PLAT8250_DEV_PLATFORM,
  842. .dev = {
  843. .platform_data = serial_ports,
  844. },
  845. };
  846. static int __init serial_dev_init(void)
  847. {
  848. return platform_device_register(&serial_device);
  849. }
  850. arch_initcall(serial_dev_init);
  851. #endif /* CONFIG_PPC_ISERIES */
  852. int check_legacy_ioport(unsigned long base_port)
  853. {
  854. if (ppc_md.check_legacy_ioport == NULL)
  855. return 0;
  856. return ppc_md.check_legacy_ioport(base_port);
  857. }
  858. EXPORT_SYMBOL(check_legacy_ioport);
  859. #ifdef CONFIG_XMON
  860. static int __init early_xmon(char *p)
  861. {
  862. /* ensure xmon is enabled */
  863. if (p) {
  864. if (strncmp(p, "on", 2) == 0)
  865. xmon_init(1);
  866. if (strncmp(p, "off", 3) == 0)
  867. xmon_init(0);
  868. if (strncmp(p, "early", 5) != 0)
  869. return 0;
  870. }
  871. xmon_init(1);
  872. debugger(NULL);
  873. return 0;
  874. }
  875. early_param("xmon", early_xmon);
  876. #endif
  877. void cpu_die(void)
  878. {
  879. if (ppc_md.cpu_die)
  880. ppc_md.cpu_die();
  881. }