setup_64.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. *
  3. * Common boot and setup code.
  4. *
  5. * Copyright (C) 2001 PPC64 Team, IBM Corp
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG
  13. #include <linux/config.h>
  14. #include <linux/module.h>
  15. #include <linux/string.h>
  16. #include <linux/sched.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/reboot.h>
  20. #include <linux/delay.h>
  21. #include <linux/initrd.h>
  22. #include <linux/ide.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/ioport.h>
  25. #include <linux/console.h>
  26. #include <linux/utsname.h>
  27. #include <linux/tty.h>
  28. #include <linux/root_dev.h>
  29. #include <linux/notifier.h>
  30. #include <linux/cpu.h>
  31. #include <linux/unistd.h>
  32. #include <linux/serial.h>
  33. #include <linux/serial_8250.h>
  34. #include <asm/io.h>
  35. #include <asm/prom.h>
  36. #include <asm/processor.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/smp.h>
  39. #include <asm/elf.h>
  40. #include <asm/machdep.h>
  41. #include <asm/paca.h>
  42. #include <asm/ppcdebug.h>
  43. #include <asm/time.h>
  44. #include <asm/cputable.h>
  45. #include <asm/sections.h>
  46. #include <asm/btext.h>
  47. #include <asm/nvram.h>
  48. #include <asm/setup.h>
  49. #include <asm/system.h>
  50. #include <asm/rtas.h>
  51. #include <asm/iommu.h>
  52. #include <asm/serial.h>
  53. #include <asm/cache.h>
  54. #include <asm/page.h>
  55. #include <asm/mmu.h>
  56. #include <asm/lmb.h>
  57. #include <asm/iseries/it_lp_naca.h>
  58. #include <asm/firmware.h>
  59. #include <asm/systemcfg.h>
  60. #include <asm/xmon.h>
  61. #ifdef DEBUG
  62. #define DBG(fmt...) udbg_printf(fmt)
  63. #else
  64. #define DBG(fmt...)
  65. #endif
  66. /*
  67. * Here are some early debugging facilities. You can enable one
  68. * but your kernel will not boot on anything else if you do so
  69. */
  70. /* This one is for use on LPAR machines that support an HVC console
  71. * on vterm 0
  72. */
  73. extern void udbg_init_debug_lpar(void);
  74. /* This one is for use on Apple G5 machines
  75. */
  76. extern void udbg_init_pmac_realmode(void);
  77. /* That's RTAS panel debug */
  78. extern void call_rtas_display_status_delay(unsigned char c);
  79. /* Here's maple real mode debug */
  80. extern void udbg_init_maple_realmode(void);
  81. #define EARLY_DEBUG_INIT() do {} while(0)
  82. #if 0
  83. #define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
  84. #define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
  85. #define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
  86. #define EARLY_DEBUG_INIT() \
  87. do { udbg_putc = call_rtas_display_status_delay; } while(0)
  88. #endif
  89. /* extern void *stab; */
  90. extern unsigned long klimit;
  91. extern void mm_init_ppc64(void);
  92. extern void stab_initialize(unsigned long stab);
  93. extern void htab_initialize(void);
  94. extern void early_init_devtree(void *flat_dt);
  95. extern void unflatten_device_tree(void);
  96. extern void smp_release_cpus(void);
  97. int have_of = 1;
  98. int boot_cpuid = 0;
  99. int boot_cpuid_phys = 0;
  100. dev_t boot_dev;
  101. u64 ppc64_pft_size;
  102. struct ppc64_caches ppc64_caches;
  103. EXPORT_SYMBOL_GPL(ppc64_caches);
  104. /*
  105. * These are used in binfmt_elf.c to put aux entries on the stack
  106. * for each elf executable being started.
  107. */
  108. int dcache_bsize;
  109. int icache_bsize;
  110. int ucache_bsize;
  111. /* The main machine-dep calls structure
  112. */
  113. struct machdep_calls ppc_md;
  114. EXPORT_SYMBOL(ppc_md);
  115. #ifdef CONFIG_MAGIC_SYSRQ
  116. unsigned long SYSRQ_KEY;
  117. #endif /* CONFIG_MAGIC_SYSRQ */
  118. static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
  119. static struct notifier_block ppc64_panic_block = {
  120. .notifier_call = ppc64_panic_event,
  121. .priority = INT_MIN /* may not return; must be done last */
  122. };
  123. #ifdef CONFIG_SMP
  124. static int smt_enabled_cmdline;
  125. /* Look for ibm,smt-enabled OF option */
  126. static void check_smt_enabled(void)
  127. {
  128. struct device_node *dn;
  129. char *smt_option;
  130. /* Allow the command line to overrule the OF option */
  131. if (smt_enabled_cmdline)
  132. return;
  133. dn = of_find_node_by_path("/options");
  134. if (dn) {
  135. smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
  136. if (smt_option) {
  137. if (!strcmp(smt_option, "on"))
  138. smt_enabled_at_boot = 1;
  139. else if (!strcmp(smt_option, "off"))
  140. smt_enabled_at_boot = 0;
  141. }
  142. }
  143. }
  144. /* Look for smt-enabled= cmdline option */
  145. static int __init early_smt_enabled(char *p)
  146. {
  147. smt_enabled_cmdline = 1;
  148. if (!p)
  149. return 0;
  150. if (!strcmp(p, "on") || !strcmp(p, "1"))
  151. smt_enabled_at_boot = 1;
  152. else if (!strcmp(p, "off") || !strcmp(p, "0"))
  153. smt_enabled_at_boot = 0;
  154. return 0;
  155. }
  156. early_param("smt-enabled", early_smt_enabled);
  157. /**
  158. * setup_cpu_maps - initialize the following cpu maps:
  159. * cpu_possible_map
  160. * cpu_present_map
  161. * cpu_sibling_map
  162. *
  163. * Having the possible map set up early allows us to restrict allocations
  164. * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
  165. *
  166. * We do not initialize the online map here; cpus set their own bits in
  167. * cpu_online_map as they come up.
  168. *
  169. * This function is valid only for Open Firmware systems. finish_device_tree
  170. * must be called before using this.
  171. *
  172. * While we're here, we may as well set the "physical" cpu ids in the paca.
  173. */
  174. static void __init setup_cpu_maps(void)
  175. {
  176. struct device_node *dn = NULL;
  177. int cpu = 0;
  178. int swap_cpuid = 0;
  179. check_smt_enabled();
  180. while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
  181. u32 *intserv;
  182. int j, len = sizeof(u32), nthreads;
  183. intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
  184. &len);
  185. if (!intserv)
  186. intserv = (u32 *)get_property(dn, "reg", NULL);
  187. nthreads = len / sizeof(u32);
  188. for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
  189. cpu_set(cpu, cpu_present_map);
  190. set_hard_smp_processor_id(cpu, intserv[j]);
  191. if (intserv[j] == boot_cpuid_phys)
  192. swap_cpuid = cpu;
  193. cpu_set(cpu, cpu_possible_map);
  194. cpu++;
  195. }
  196. }
  197. /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
  198. * boot cpu is logical 0.
  199. */
  200. if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
  201. u32 tmp;
  202. tmp = get_hard_smp_processor_id(0);
  203. set_hard_smp_processor_id(0, boot_cpuid_phys);
  204. set_hard_smp_processor_id(swap_cpuid, tmp);
  205. }
  206. /*
  207. * On pSeries LPAR, we need to know how many cpus
  208. * could possibly be added to this partition.
  209. */
  210. if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
  211. (dn = of_find_node_by_path("/rtas"))) {
  212. int num_addr_cell, num_size_cell, maxcpus;
  213. unsigned int *ireg;
  214. num_addr_cell = prom_n_addr_cells(dn);
  215. num_size_cell = prom_n_size_cells(dn);
  216. ireg = (unsigned int *)
  217. get_property(dn, "ibm,lrdr-capacity", NULL);
  218. if (!ireg)
  219. goto out;
  220. maxcpus = ireg[num_addr_cell + num_size_cell];
  221. /* Double maxcpus for processors which have SMT capability */
  222. if (cpu_has_feature(CPU_FTR_SMT))
  223. maxcpus *= 2;
  224. if (maxcpus > NR_CPUS) {
  225. printk(KERN_WARNING
  226. "Partition configured for %d cpus, "
  227. "operating system maximum is %d.\n",
  228. maxcpus, NR_CPUS);
  229. maxcpus = NR_CPUS;
  230. } else
  231. printk(KERN_INFO "Partition configured for %d cpus.\n",
  232. maxcpus);
  233. for (cpu = 0; cpu < maxcpus; cpu++)
  234. cpu_set(cpu, cpu_possible_map);
  235. out:
  236. of_node_put(dn);
  237. }
  238. /*
  239. * Do the sibling map; assume only two threads per processor.
  240. */
  241. for_each_cpu(cpu) {
  242. cpu_set(cpu, cpu_sibling_map[cpu]);
  243. if (cpu_has_feature(CPU_FTR_SMT))
  244. cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
  245. }
  246. systemcfg->processorCount = num_present_cpus();
  247. }
  248. #endif /* CONFIG_SMP */
  249. extern struct machdep_calls pSeries_md;
  250. extern struct machdep_calls pmac_md;
  251. extern struct machdep_calls maple_md;
  252. extern struct machdep_calls cell_md;
  253. extern struct machdep_calls iseries_md;
  254. /* Ultimately, stuff them in an elf section like initcalls... */
  255. static struct machdep_calls __initdata *machines[] = {
  256. #ifdef CONFIG_PPC_PSERIES
  257. &pSeries_md,
  258. #endif /* CONFIG_PPC_PSERIES */
  259. #ifdef CONFIG_PPC_PMAC
  260. &pmac_md,
  261. #endif /* CONFIG_PPC_PMAC */
  262. #ifdef CONFIG_PPC_MAPLE
  263. &maple_md,
  264. #endif /* CONFIG_PPC_MAPLE */
  265. #ifdef CONFIG_PPC_CELL
  266. &cell_md,
  267. #endif
  268. #ifdef CONFIG_PPC_ISERIES
  269. &iseries_md,
  270. #endif
  271. NULL
  272. };
  273. /*
  274. * Early initialization entry point. This is called by head.S
  275. * with MMU translation disabled. We rely on the "feature" of
  276. * the CPU that ignores the top 2 bits of the address in real
  277. * mode so we can access kernel globals normally provided we
  278. * only toy with things in the RMO region. From here, we do
  279. * some early parsing of the device-tree to setup out LMB
  280. * data structures, and allocate & initialize the hash table
  281. * and segment tables so we can start running with translation
  282. * enabled.
  283. *
  284. * It is this function which will call the probe() callback of
  285. * the various platform types and copy the matching one to the
  286. * global ppc_md structure. Your platform can eventually do
  287. * some very early initializations from the probe() routine, but
  288. * this is not recommended, be very careful as, for example, the
  289. * device-tree is not accessible via normal means at this point.
  290. */
  291. void __init early_setup(unsigned long dt_ptr)
  292. {
  293. struct paca_struct *lpaca = get_paca();
  294. static struct machdep_calls **mach;
  295. /*
  296. * Enable early debugging if any specified (see top of
  297. * this file)
  298. */
  299. EARLY_DEBUG_INIT();
  300. DBG(" -> early_setup()\n");
  301. /*
  302. * Fill the default DBG level (do we want to keep
  303. * that old mecanism around forever ?)
  304. */
  305. ppcdbg_initialize();
  306. /*
  307. * Do early initializations using the flattened device
  308. * tree, like retreiving the physical memory map or
  309. * calculating/retreiving the hash table size
  310. */
  311. early_init_devtree(__va(dt_ptr));
  312. /*
  313. * Iterate all ppc_md structures until we find the proper
  314. * one for the current machine type
  315. */
  316. DBG("Probing machine type for platform %x...\n",
  317. systemcfg->platform);
  318. for (mach = machines; *mach; mach++) {
  319. if ((*mach)->probe(systemcfg->platform))
  320. break;
  321. }
  322. /* What can we do if we didn't find ? */
  323. if (*mach == NULL) {
  324. DBG("No suitable machine found !\n");
  325. for (;;);
  326. }
  327. ppc_md = **mach;
  328. DBG("Found, Initializing memory management...\n");
  329. /*
  330. * Initialize stab / SLB management
  331. */
  332. if (!firmware_has_feature(FW_FEATURE_ISERIES))
  333. stab_initialize(lpaca->stab_real);
  334. /*
  335. * Initialize the MMU Hash table and create the linear mapping
  336. * of memory
  337. */
  338. htab_initialize();
  339. DBG(" <- early_setup()\n");
  340. }
  341. /*
  342. * Initialize some remaining members of the ppc64_caches and systemcfg structures
  343. * (at least until we get rid of them completely). This is mostly some
  344. * cache informations about the CPU that will be used by cache flush
  345. * routines and/or provided to userland
  346. */
  347. static void __init initialize_cache_info(void)
  348. {
  349. struct device_node *np;
  350. unsigned long num_cpus = 0;
  351. DBG(" -> initialize_cache_info()\n");
  352. for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
  353. num_cpus += 1;
  354. /* We're assuming *all* of the CPUs have the same
  355. * d-cache and i-cache sizes... -Peter
  356. */
  357. if ( num_cpus == 1 ) {
  358. u32 *sizep, *lsizep;
  359. u32 size, lsize;
  360. const char *dc, *ic;
  361. /* Then read cache informations */
  362. if (systemcfg->platform == PLATFORM_POWERMAC) {
  363. dc = "d-cache-block-size";
  364. ic = "i-cache-block-size";
  365. } else {
  366. dc = "d-cache-line-size";
  367. ic = "i-cache-line-size";
  368. }
  369. size = 0;
  370. lsize = cur_cpu_spec->dcache_bsize;
  371. sizep = (u32 *)get_property(np, "d-cache-size", NULL);
  372. if (sizep != NULL)
  373. size = *sizep;
  374. lsizep = (u32 *) get_property(np, dc, NULL);
  375. if (lsizep != NULL)
  376. lsize = *lsizep;
  377. if (sizep == 0 || lsizep == 0)
  378. DBG("Argh, can't find dcache properties ! "
  379. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  380. systemcfg->dcache_size = ppc64_caches.dsize = size;
  381. systemcfg->dcache_line_size =
  382. ppc64_caches.dline_size = lsize;
  383. ppc64_caches.log_dline_size = __ilog2(lsize);
  384. ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
  385. size = 0;
  386. lsize = cur_cpu_spec->icache_bsize;
  387. sizep = (u32 *)get_property(np, "i-cache-size", NULL);
  388. if (sizep != NULL)
  389. size = *sizep;
  390. lsizep = (u32 *)get_property(np, ic, NULL);
  391. if (lsizep != NULL)
  392. lsize = *lsizep;
  393. if (sizep == 0 || lsizep == 0)
  394. DBG("Argh, can't find icache properties ! "
  395. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  396. systemcfg->icache_size = ppc64_caches.isize = size;
  397. systemcfg->icache_line_size =
  398. ppc64_caches.iline_size = lsize;
  399. ppc64_caches.log_iline_size = __ilog2(lsize);
  400. ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
  401. }
  402. }
  403. /* Add an eye catcher and the systemcfg layout version number */
  404. strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
  405. systemcfg->version.major = SYSTEMCFG_MAJOR;
  406. systemcfg->version.minor = SYSTEMCFG_MINOR;
  407. systemcfg->processor = mfspr(SPRN_PVR);
  408. DBG(" <- initialize_cache_info()\n");
  409. }
  410. static void __init check_for_initrd(void)
  411. {
  412. #ifdef CONFIG_BLK_DEV_INITRD
  413. u64 *prop;
  414. DBG(" -> check_for_initrd()\n");
  415. if (of_chosen) {
  416. prop = (u64 *)get_property(of_chosen,
  417. "linux,initrd-start", NULL);
  418. if (prop != NULL) {
  419. initrd_start = (unsigned long)__va(*prop);
  420. prop = (u64 *)get_property(of_chosen,
  421. "linux,initrd-end", NULL);
  422. if (prop != NULL) {
  423. initrd_end = (unsigned long)__va(*prop);
  424. initrd_below_start_ok = 1;
  425. } else
  426. initrd_start = 0;
  427. }
  428. }
  429. /* If we were passed an initrd, set the ROOT_DEV properly if the values
  430. * look sensible. If not, clear initrd reference.
  431. */
  432. if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
  433. initrd_end > initrd_start)
  434. ROOT_DEV = Root_RAM0;
  435. else
  436. initrd_start = initrd_end = 0;
  437. if (initrd_start)
  438. printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
  439. DBG(" <- check_for_initrd()\n");
  440. #endif /* CONFIG_BLK_DEV_INITRD */
  441. }
  442. /*
  443. * Do some initial setup of the system. The parameters are those which
  444. * were passed in from the bootloader.
  445. */
  446. void __init setup_system(void)
  447. {
  448. DBG(" -> setup_system()\n");
  449. /*
  450. * Unflatten the device-tree passed by prom_init or kexec
  451. */
  452. unflatten_device_tree();
  453. /*
  454. * Fill the ppc64_caches & systemcfg structures with informations
  455. * retreived from the device-tree. Need to be called before
  456. * finish_device_tree() since the later requires some of the
  457. * informations filled up here to properly parse the interrupt
  458. * tree.
  459. * It also sets up the cache line sizes which allows to call
  460. * routines like flush_icache_range (used by the hash init
  461. * later on).
  462. */
  463. initialize_cache_info();
  464. #ifdef CONFIG_PPC_RTAS
  465. /*
  466. * Initialize RTAS if available
  467. */
  468. rtas_initialize();
  469. #endif /* CONFIG_PPC_RTAS */
  470. /*
  471. * Check if we have an initrd provided via the device-tree
  472. */
  473. check_for_initrd();
  474. /*
  475. * Do some platform specific early initializations, that includes
  476. * setting up the hash table pointers. It also sets up some interrupt-mapping
  477. * related options that will be used by finish_device_tree()
  478. */
  479. ppc_md.init_early();
  480. /*
  481. * "Finish" the device-tree, that is do the actual parsing of
  482. * some of the properties like the interrupt map
  483. */
  484. finish_device_tree();
  485. #ifdef CONFIG_BOOTX_TEXT
  486. init_boot_display();
  487. #endif
  488. /*
  489. * Initialize xmon
  490. */
  491. #ifdef CONFIG_XMON_DEFAULT
  492. xmon_init(1);
  493. #endif
  494. /*
  495. * Register early console
  496. */
  497. register_early_udbg_console();
  498. /* Save unparsed command line copy for /proc/cmdline */
  499. strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
  500. parse_early_param();
  501. #ifdef CONFIG_SMP
  502. /*
  503. * iSeries has already initialized the cpu maps at this point.
  504. */
  505. setup_cpu_maps();
  506. /* Release secondary cpus out of their spinloops at 0x60 now that
  507. * we can map physical -> logical CPU ids
  508. */
  509. smp_release_cpus();
  510. #endif
  511. printk("Starting Linux PPC64 %s\n", system_utsname.version);
  512. printk("-----------------------------------------------------\n");
  513. printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
  514. printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
  515. printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
  516. printk("systemcfg = 0x%p\n", systemcfg);
  517. printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
  518. printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
  519. printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
  520. printk("ppc64_caches.dcache_line_size = 0x%x\n",
  521. ppc64_caches.dline_size);
  522. printk("ppc64_caches.icache_line_size = 0x%x\n",
  523. ppc64_caches.iline_size);
  524. printk("htab_address = 0x%p\n", htab_address);
  525. printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
  526. printk("-----------------------------------------------------\n");
  527. mm_init_ppc64();
  528. DBG(" <- setup_system()\n");
  529. }
  530. static int ppc64_panic_event(struct notifier_block *this,
  531. unsigned long event, void *ptr)
  532. {
  533. ppc_md.panic((char *)ptr); /* May not return */
  534. return NOTIFY_DONE;
  535. }
  536. #ifdef CONFIG_IRQSTACKS
  537. static void __init irqstack_early_init(void)
  538. {
  539. unsigned int i;
  540. /*
  541. * interrupt stacks must be under 256MB, we cannot afford to take
  542. * SLB misses on them.
  543. */
  544. for_each_cpu(i) {
  545. softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  546. THREAD_SIZE, 0x10000000));
  547. hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  548. THREAD_SIZE, 0x10000000));
  549. }
  550. }
  551. #else
  552. #define irqstack_early_init()
  553. #endif
  554. /*
  555. * Stack space used when we detect a bad kernel stack pointer, and
  556. * early in SMP boots before relocation is enabled.
  557. */
  558. static void __init emergency_stack_init(void)
  559. {
  560. unsigned long limit;
  561. unsigned int i;
  562. /*
  563. * Emergency stacks must be under 256MB, we cannot afford to take
  564. * SLB misses on them. The ABI also requires them to be 128-byte
  565. * aligned.
  566. *
  567. * Since we use these as temporary stacks during secondary CPU
  568. * bringup, we need to get at them in real mode. This means they
  569. * must also be within the RMO region.
  570. */
  571. limit = min(0x10000000UL, lmb.rmo_size);
  572. for_each_cpu(i)
  573. paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
  574. limit)) + PAGE_SIZE;
  575. }
  576. /*
  577. * Called from setup_arch to initialize the bitmap of available
  578. * syscalls in the systemcfg page
  579. */
  580. void __init setup_syscall_map(void)
  581. {
  582. unsigned int i, count64 = 0, count32 = 0;
  583. extern unsigned long *sys_call_table;
  584. extern unsigned long sys_ni_syscall;
  585. for (i = 0; i < __NR_syscalls; i++) {
  586. if (sys_call_table[i*2] != sys_ni_syscall) {
  587. count64++;
  588. systemcfg->syscall_map_64[i >> 5] |=
  589. 0x80000000UL >> (i & 0x1f);
  590. }
  591. if (sys_call_table[i*2+1] != sys_ni_syscall) {
  592. count32++;
  593. systemcfg->syscall_map_32[i >> 5] |=
  594. 0x80000000UL >> (i & 0x1f);
  595. }
  596. }
  597. printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
  598. count32, count64);
  599. }
  600. /*
  601. * Called into from start_kernel, after lock_kernel has been called.
  602. * Initializes bootmem, which is unsed to manage page allocation until
  603. * mem_init is called.
  604. */
  605. void __init setup_arch(char **cmdline_p)
  606. {
  607. extern void do_init_bootmem(void);
  608. ppc64_boot_msg(0x12, "Setup Arch");
  609. *cmdline_p = cmd_line;
  610. /*
  611. * Set cache line size based on type of cpu as a default.
  612. * Systems with OF can look in the properties on the cpu node(s)
  613. * for a possibly more accurate value.
  614. */
  615. dcache_bsize = ppc64_caches.dline_size;
  616. icache_bsize = ppc64_caches.iline_size;
  617. /* reboot on panic */
  618. panic_timeout = 180;
  619. if (ppc_md.panic)
  620. notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
  621. init_mm.start_code = PAGE_OFFSET;
  622. init_mm.end_code = (unsigned long) _etext;
  623. init_mm.end_data = (unsigned long) _edata;
  624. init_mm.brk = klimit;
  625. irqstack_early_init();
  626. emergency_stack_init();
  627. stabs_alloc();
  628. /* set up the bootmem stuff with available memory */
  629. do_init_bootmem();
  630. sparse_init();
  631. /* initialize the syscall map in systemcfg */
  632. setup_syscall_map();
  633. #ifdef CONFIG_DUMMY_CONSOLE
  634. conswitchp = &dummy_con;
  635. #endif
  636. ppc_md.setup_arch();
  637. /* Use the default idle loop if the platform hasn't provided one. */
  638. if (NULL == ppc_md.idle_loop) {
  639. ppc_md.idle_loop = default_idle;
  640. printk(KERN_INFO "Using default idle loop\n");
  641. }
  642. paging_init();
  643. ppc64_boot_msg(0x15, "Setup Done");
  644. }
  645. /* ToDo: do something useful if ppc_md is not yet setup. */
  646. #define PPC64_LINUX_FUNCTION 0x0f000000
  647. #define PPC64_IPL_MESSAGE 0xc0000000
  648. #define PPC64_TERM_MESSAGE 0xb0000000
  649. static void ppc64_do_msg(unsigned int src, const char *msg)
  650. {
  651. if (ppc_md.progress) {
  652. char buf[128];
  653. sprintf(buf, "%08X\n", src);
  654. ppc_md.progress(buf, 0);
  655. snprintf(buf, 128, "%s", msg);
  656. ppc_md.progress(buf, 0);
  657. }
  658. }
  659. /* Print a boot progress message. */
  660. void ppc64_boot_msg(unsigned int src, const char *msg)
  661. {
  662. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
  663. printk("[boot]%04x %s\n", src, msg);
  664. }
  665. /* Print a termination message (print only -- does not stop the kernel) */
  666. void ppc64_terminate_msg(unsigned int src, const char *msg)
  667. {
  668. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
  669. printk("[terminate]%04x %s\n", src, msg);
  670. }
  671. #ifndef CONFIG_PPC_ISERIES
  672. /*
  673. * This function can be used by platforms to "find" legacy serial ports.
  674. * It works for "serial" nodes under an "isa" node, and will try to
  675. * respect the "ibm,aix-loc" property if any. It works with up to 8
  676. * ports.
  677. */
  678. #define MAX_LEGACY_SERIAL_PORTS 8
  679. static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
  680. static unsigned int old_serial_count;
  681. void __init generic_find_legacy_serial_ports(u64 *physport,
  682. unsigned int *default_speed)
  683. {
  684. struct device_node *np;
  685. u32 *sizeprop;
  686. struct isa_reg_property {
  687. u32 space;
  688. u32 address;
  689. u32 size;
  690. };
  691. struct pci_reg_property {
  692. struct pci_address addr;
  693. u32 size_hi;
  694. u32 size_lo;
  695. };
  696. DBG(" -> generic_find_legacy_serial_port()\n");
  697. *physport = 0;
  698. if (default_speed)
  699. *default_speed = 0;
  700. np = of_find_node_by_path("/");
  701. if (!np)
  702. return;
  703. /* First fill our array */
  704. for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
  705. struct device_node *isa, *pci;
  706. struct isa_reg_property *reg;
  707. unsigned long phys_size, addr_size, io_base;
  708. u32 *rangesp;
  709. u32 *interrupts, *clk, *spd;
  710. char *typep;
  711. int index, rlen, rentsize;
  712. /* Ok, first check if it's under an "isa" parent */
  713. isa = of_get_parent(np);
  714. if (!isa || strcmp(isa->name, "isa")) {
  715. DBG("%s: no isa parent found\n", np->full_name);
  716. continue;
  717. }
  718. /* Now look for an "ibm,aix-loc" property that gives us ordering
  719. * if any...
  720. */
  721. typep = (char *)get_property(np, "ibm,aix-loc", NULL);
  722. /* Get the ISA port number */
  723. reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
  724. if (reg == NULL)
  725. goto next_port;
  726. /* We assume the interrupt number isn't translated ... */
  727. interrupts = (u32 *)get_property(np, "interrupts", NULL);
  728. /* get clock freq. if present */
  729. clk = (u32 *)get_property(np, "clock-frequency", NULL);
  730. /* get default speed if present */
  731. spd = (u32 *)get_property(np, "current-speed", NULL);
  732. /* Default to locate at end of array */
  733. index = old_serial_count; /* end of the array by default */
  734. /* If we have a location index, then use it */
  735. if (typep && *typep == 'S') {
  736. index = simple_strtol(typep+1, NULL, 0) - 1;
  737. /* if index is out of range, use end of array instead */
  738. if (index >= MAX_LEGACY_SERIAL_PORTS)
  739. index = old_serial_count;
  740. /* if our index is still out of range, that mean that
  741. * array is full, we could scan for a free slot but that
  742. * make little sense to bother, just skip the port
  743. */
  744. if (index >= MAX_LEGACY_SERIAL_PORTS)
  745. goto next_port;
  746. if (index >= old_serial_count)
  747. old_serial_count = index + 1;
  748. /* Check if there is a port who already claimed our slot */
  749. if (serial_ports[index].iobase != 0) {
  750. /* if we still have some room, move it, else override */
  751. if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
  752. DBG("Moved legacy port %d -> %d\n", index,
  753. old_serial_count);
  754. serial_ports[old_serial_count++] =
  755. serial_ports[index];
  756. } else {
  757. DBG("Replacing legacy port %d\n", index);
  758. }
  759. }
  760. }
  761. if (index >= MAX_LEGACY_SERIAL_PORTS)
  762. goto next_port;
  763. if (index >= old_serial_count)
  764. old_serial_count = index + 1;
  765. /* Now fill the entry */
  766. memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
  767. serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
  768. serial_ports[index].iobase = reg->address;
  769. serial_ports[index].irq = interrupts ? interrupts[0] : 0;
  770. serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
  771. DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
  772. index,
  773. serial_ports[index].iobase,
  774. serial_ports[index].irq,
  775. serial_ports[index].uartclk);
  776. /* Get phys address of IO reg for port 1 */
  777. if (index != 0)
  778. goto next_port;
  779. pci = of_get_parent(isa);
  780. if (!pci) {
  781. DBG("%s: no pci parent found\n", np->full_name);
  782. goto next_port;
  783. }
  784. rangesp = (u32 *)get_property(pci, "ranges", &rlen);
  785. if (rangesp == NULL) {
  786. of_node_put(pci);
  787. goto next_port;
  788. }
  789. rlen /= 4;
  790. /* we need the #size-cells of the PCI bridge node itself */
  791. phys_size = 1;
  792. sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
  793. if (sizeprop != NULL)
  794. phys_size = *sizeprop;
  795. /* we need the parent #addr-cells */
  796. addr_size = prom_n_addr_cells(pci);
  797. rentsize = 3 + addr_size + phys_size;
  798. io_base = 0;
  799. for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
  800. if (((rangesp[0] >> 24) & 0x3) != 1)
  801. continue; /* not IO space */
  802. io_base = rangesp[3];
  803. if (addr_size == 2)
  804. io_base = (io_base << 32) | rangesp[4];
  805. }
  806. if (io_base != 0) {
  807. *physport = io_base + reg->address;
  808. if (default_speed && spd)
  809. *default_speed = *spd;
  810. }
  811. of_node_put(pci);
  812. next_port:
  813. of_node_put(isa);
  814. }
  815. DBG(" <- generic_find_legacy_serial_port()\n");
  816. }
  817. static struct platform_device serial_device = {
  818. .name = "serial8250",
  819. .id = PLAT8250_DEV_PLATFORM,
  820. .dev = {
  821. .platform_data = serial_ports,
  822. },
  823. };
  824. static int __init serial_dev_init(void)
  825. {
  826. return platform_device_register(&serial_device);
  827. }
  828. arch_initcall(serial_dev_init);
  829. #endif /* CONFIG_PPC_ISERIES */
  830. int check_legacy_ioport(unsigned long base_port)
  831. {
  832. if (ppc_md.check_legacy_ioport == NULL)
  833. return 0;
  834. return ppc_md.check_legacy_ioport(base_port);
  835. }
  836. EXPORT_SYMBOL(check_legacy_ioport);
  837. #ifdef CONFIG_XMON
  838. static int __init early_xmon(char *p)
  839. {
  840. /* ensure xmon is enabled */
  841. if (p) {
  842. if (strncmp(p, "on", 2) == 0)
  843. xmon_init(1);
  844. if (strncmp(p, "off", 3) == 0)
  845. xmon_init(0);
  846. if (strncmp(p, "early", 5) != 0)
  847. return 0;
  848. }
  849. xmon_init(1);
  850. debugger(NULL);
  851. return 0;
  852. }
  853. early_param("xmon", early_xmon);
  854. #endif
  855. void cpu_die(void)
  856. {
  857. if (ppc_md.cpu_die)
  858. ppc_md.cpu_die();
  859. }