setup_64.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. /*
  2. *
  3. * Common boot and setup code.
  4. *
  5. * Copyright (C) 2001 PPC64 Team, IBM Corp
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG
  13. #include <linux/config.h>
  14. #include <linux/module.h>
  15. #include <linux/string.h>
  16. #include <linux/sched.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/reboot.h>
  20. #include <linux/delay.h>
  21. #include <linux/initrd.h>
  22. #include <linux/ide.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/ioport.h>
  25. #include <linux/console.h>
  26. #include <linux/utsname.h>
  27. #include <linux/tty.h>
  28. #include <linux/root_dev.h>
  29. #include <linux/notifier.h>
  30. #include <linux/cpu.h>
  31. #include <linux/unistd.h>
  32. #include <linux/serial.h>
  33. #include <linux/serial_8250.h>
  34. #include <asm/io.h>
  35. #include <asm/prom.h>
  36. #include <asm/processor.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/smp.h>
  39. #include <asm/elf.h>
  40. #include <asm/machdep.h>
  41. #include <asm/paca.h>
  42. #include <asm/ppcdebug.h>
  43. #include <asm/time.h>
  44. #include <asm/cputable.h>
  45. #include <asm/sections.h>
  46. #include <asm/btext.h>
  47. #include <asm/nvram.h>
  48. #include <asm/setup.h>
  49. #include <asm/system.h>
  50. #include <asm/rtas.h>
  51. #include <asm/iommu.h>
  52. #include <asm/serial.h>
  53. #include <asm/cache.h>
  54. #include <asm/page.h>
  55. #include <asm/mmu.h>
  56. #include <asm/lmb.h>
  57. #include <asm/iseries/it_lp_naca.h>
  58. #include <asm/firmware.h>
  59. #include <asm/systemcfg.h>
  60. #include <asm/xmon.h>
  61. #ifdef DEBUG
  62. #define DBG(fmt...) udbg_printf(fmt)
  63. #else
  64. #define DBG(fmt...)
  65. #endif
  66. /*
  67. * Here are some early debugging facilities. You can enable one
  68. * but your kernel will not boot on anything else if you do so
  69. */
  70. /* This one is for use on LPAR machines that support an HVC console
  71. * on vterm 0
  72. */
  73. extern void udbg_init_debug_lpar(void);
  74. /* This one is for use on Apple G5 machines
  75. */
  76. extern void udbg_init_pmac_realmode(void);
  77. /* That's RTAS panel debug */
  78. extern void call_rtas_display_status_delay(unsigned char c);
  79. /* Here's maple real mode debug */
  80. extern void udbg_init_maple_realmode(void);
  81. #define EARLY_DEBUG_INIT() do {} while(0)
  82. #if 0
  83. #define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
  84. #define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
  85. #define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
  86. #define EARLY_DEBUG_INIT() \
  87. do { udbg_putc = call_rtas_display_status_delay; } while(0)
  88. #endif
  89. /* extern void *stab; */
  90. extern unsigned long klimit;
  91. extern void mm_init_ppc64(void);
  92. extern void stab_initialize(unsigned long stab);
  93. extern void htab_initialize(void);
  94. extern void early_init_devtree(void *flat_dt);
  95. extern void unflatten_device_tree(void);
  96. int have_of = 1;
  97. int boot_cpuid = 0;
  98. int boot_cpuid_phys = 0;
  99. dev_t boot_dev;
  100. u64 ppc64_pft_size;
  101. struct ppc64_caches ppc64_caches;
  102. EXPORT_SYMBOL_GPL(ppc64_caches);
  103. /*
  104. * These are used in binfmt_elf.c to put aux entries on the stack
  105. * for each elf executable being started.
  106. */
  107. int dcache_bsize;
  108. int icache_bsize;
  109. int ucache_bsize;
  110. /* The main machine-dep calls structure
  111. */
  112. struct machdep_calls ppc_md;
  113. EXPORT_SYMBOL(ppc_md);
  114. #ifdef CONFIG_MAGIC_SYSRQ
  115. unsigned long SYSRQ_KEY;
  116. #endif /* CONFIG_MAGIC_SYSRQ */
  117. static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
  118. static struct notifier_block ppc64_panic_block = {
  119. .notifier_call = ppc64_panic_event,
  120. .priority = INT_MIN /* may not return; must be done last */
  121. };
  122. #ifdef CONFIG_SMP
  123. static int smt_enabled_cmdline;
  124. /* Look for ibm,smt-enabled OF option */
  125. static void check_smt_enabled(void)
  126. {
  127. struct device_node *dn;
  128. char *smt_option;
  129. /* Allow the command line to overrule the OF option */
  130. if (smt_enabled_cmdline)
  131. return;
  132. dn = of_find_node_by_path("/options");
  133. if (dn) {
  134. smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
  135. if (smt_option) {
  136. if (!strcmp(smt_option, "on"))
  137. smt_enabled_at_boot = 1;
  138. else if (!strcmp(smt_option, "off"))
  139. smt_enabled_at_boot = 0;
  140. }
  141. }
  142. }
  143. /* Look for smt-enabled= cmdline option */
  144. static int __init early_smt_enabled(char *p)
  145. {
  146. smt_enabled_cmdline = 1;
  147. if (!p)
  148. return 0;
  149. if (!strcmp(p, "on") || !strcmp(p, "1"))
  150. smt_enabled_at_boot = 1;
  151. else if (!strcmp(p, "off") || !strcmp(p, "0"))
  152. smt_enabled_at_boot = 0;
  153. return 0;
  154. }
  155. early_param("smt-enabled", early_smt_enabled);
  156. /**
  157. * setup_cpu_maps - initialize the following cpu maps:
  158. * cpu_possible_map
  159. * cpu_present_map
  160. * cpu_sibling_map
  161. *
  162. * Having the possible map set up early allows us to restrict allocations
  163. * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
  164. *
  165. * We do not initialize the online map here; cpus set their own bits in
  166. * cpu_online_map as they come up.
  167. *
  168. * This function is valid only for Open Firmware systems. finish_device_tree
  169. * must be called before using this.
  170. *
  171. * While we're here, we may as well set the "physical" cpu ids in the paca.
  172. */
  173. static void __init setup_cpu_maps(void)
  174. {
  175. struct device_node *dn = NULL;
  176. int cpu = 0;
  177. int swap_cpuid = 0;
  178. check_smt_enabled();
  179. while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
  180. u32 *intserv;
  181. int j, len = sizeof(u32), nthreads;
  182. intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
  183. &len);
  184. if (!intserv)
  185. intserv = (u32 *)get_property(dn, "reg", NULL);
  186. nthreads = len / sizeof(u32);
  187. for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
  188. cpu_set(cpu, cpu_present_map);
  189. set_hard_smp_processor_id(cpu, intserv[j]);
  190. if (intserv[j] == boot_cpuid_phys)
  191. swap_cpuid = cpu;
  192. cpu_set(cpu, cpu_possible_map);
  193. cpu++;
  194. }
  195. }
  196. /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
  197. * boot cpu is logical 0.
  198. */
  199. if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
  200. u32 tmp;
  201. tmp = get_hard_smp_processor_id(0);
  202. set_hard_smp_processor_id(0, boot_cpuid_phys);
  203. set_hard_smp_processor_id(swap_cpuid, tmp);
  204. }
  205. /*
  206. * On pSeries LPAR, we need to know how many cpus
  207. * could possibly be added to this partition.
  208. */
  209. if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
  210. (dn = of_find_node_by_path("/rtas"))) {
  211. int num_addr_cell, num_size_cell, maxcpus;
  212. unsigned int *ireg;
  213. num_addr_cell = prom_n_addr_cells(dn);
  214. num_size_cell = prom_n_size_cells(dn);
  215. ireg = (unsigned int *)
  216. get_property(dn, "ibm,lrdr-capacity", NULL);
  217. if (!ireg)
  218. goto out;
  219. maxcpus = ireg[num_addr_cell + num_size_cell];
  220. /* Double maxcpus for processors which have SMT capability */
  221. if (cpu_has_feature(CPU_FTR_SMT))
  222. maxcpus *= 2;
  223. if (maxcpus > NR_CPUS) {
  224. printk(KERN_WARNING
  225. "Partition configured for %d cpus, "
  226. "operating system maximum is %d.\n",
  227. maxcpus, NR_CPUS);
  228. maxcpus = NR_CPUS;
  229. } else
  230. printk(KERN_INFO "Partition configured for %d cpus.\n",
  231. maxcpus);
  232. for (cpu = 0; cpu < maxcpus; cpu++)
  233. cpu_set(cpu, cpu_possible_map);
  234. out:
  235. of_node_put(dn);
  236. }
  237. /*
  238. * Do the sibling map; assume only two threads per processor.
  239. */
  240. for_each_cpu(cpu) {
  241. cpu_set(cpu, cpu_sibling_map[cpu]);
  242. if (cpu_has_feature(CPU_FTR_SMT))
  243. cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
  244. }
  245. systemcfg->processorCount = num_present_cpus();
  246. }
  247. #endif /* CONFIG_SMP */
  248. extern struct machdep_calls pSeries_md;
  249. extern struct machdep_calls pmac_md;
  250. extern struct machdep_calls maple_md;
  251. extern struct machdep_calls cell_md;
  252. extern struct machdep_calls iseries_md;
  253. /* Ultimately, stuff them in an elf section like initcalls... */
  254. static struct machdep_calls __initdata *machines[] = {
  255. #ifdef CONFIG_PPC_PSERIES
  256. &pSeries_md,
  257. #endif /* CONFIG_PPC_PSERIES */
  258. #ifdef CONFIG_PPC_PMAC
  259. &pmac_md,
  260. #endif /* CONFIG_PPC_PMAC */
  261. #ifdef CONFIG_PPC_MAPLE
  262. &maple_md,
  263. #endif /* CONFIG_PPC_MAPLE */
  264. #ifdef CONFIG_PPC_CELL
  265. &cell_md,
  266. #endif
  267. #ifdef CONFIG_PPC_ISERIES
  268. &iseries_md,
  269. #endif
  270. NULL
  271. };
  272. /*
  273. * Early initialization entry point. This is called by head.S
  274. * with MMU translation disabled. We rely on the "feature" of
  275. * the CPU that ignores the top 2 bits of the address in real
  276. * mode so we can access kernel globals normally provided we
  277. * only toy with things in the RMO region. From here, we do
  278. * some early parsing of the device-tree to setup out LMB
  279. * data structures, and allocate & initialize the hash table
  280. * and segment tables so we can start running with translation
  281. * enabled.
  282. *
  283. * It is this function which will call the probe() callback of
  284. * the various platform types and copy the matching one to the
  285. * global ppc_md structure. Your platform can eventually do
  286. * some very early initializations from the probe() routine, but
  287. * this is not recommended, be very careful as, for example, the
  288. * device-tree is not accessible via normal means at this point.
  289. */
  290. void __init early_setup(unsigned long dt_ptr)
  291. {
  292. struct paca_struct *lpaca = get_paca();
  293. static struct machdep_calls **mach;
  294. /*
  295. * Enable early debugging if any specified (see top of
  296. * this file)
  297. */
  298. EARLY_DEBUG_INIT();
  299. DBG(" -> early_setup()\n");
  300. /*
  301. * Fill the default DBG level (do we want to keep
  302. * that old mecanism around forever ?)
  303. */
  304. ppcdbg_initialize();
  305. /*
  306. * Do early initializations using the flattened device
  307. * tree, like retreiving the physical memory map or
  308. * calculating/retreiving the hash table size
  309. */
  310. early_init_devtree(__va(dt_ptr));
  311. /*
  312. * Iterate all ppc_md structures until we find the proper
  313. * one for the current machine type
  314. */
  315. DBG("Probing machine type for platform %x...\n",
  316. systemcfg->platform);
  317. for (mach = machines; *mach; mach++) {
  318. if ((*mach)->probe(systemcfg->platform))
  319. break;
  320. }
  321. /* What can we do if we didn't find ? */
  322. if (*mach == NULL) {
  323. DBG("No suitable machine found !\n");
  324. for (;;);
  325. }
  326. ppc_md = **mach;
  327. DBG("Found, Initializing memory management...\n");
  328. /*
  329. * Initialize stab / SLB management
  330. */
  331. if (!firmware_has_feature(FW_FEATURE_ISERIES))
  332. stab_initialize(lpaca->stab_real);
  333. /*
  334. * Initialize the MMU Hash table and create the linear mapping
  335. * of memory
  336. */
  337. htab_initialize();
  338. DBG(" <- early_setup()\n");
  339. }
  340. #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
  341. void smp_release_cpus(void)
  342. {
  343. extern unsigned long __secondary_hold_spinloop;
  344. DBG(" -> smp_release_cpus()\n");
  345. /* All secondary cpus are spinning on a common spinloop, release them
  346. * all now so they can start to spin on their individual paca
  347. * spinloops. For non SMP kernels, the secondary cpus never get out
  348. * of the common spinloop.
  349. * This is useless but harmless on iSeries, secondaries are already
  350. * waiting on their paca spinloops. */
  351. __secondary_hold_spinloop = 1;
  352. mb();
  353. DBG(" <- smp_release_cpus()\n");
  354. }
  355. #endif /* CONFIG_SMP || CONFIG_KEXEC */
  356. /*
  357. * Initialize some remaining members of the ppc64_caches and systemcfg structures
  358. * (at least until we get rid of them completely). This is mostly some
  359. * cache informations about the CPU that will be used by cache flush
  360. * routines and/or provided to userland
  361. */
  362. static void __init initialize_cache_info(void)
  363. {
  364. struct device_node *np;
  365. unsigned long num_cpus = 0;
  366. DBG(" -> initialize_cache_info()\n");
  367. for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
  368. num_cpus += 1;
  369. /* We're assuming *all* of the CPUs have the same
  370. * d-cache and i-cache sizes... -Peter
  371. */
  372. if ( num_cpus == 1 ) {
  373. u32 *sizep, *lsizep;
  374. u32 size, lsize;
  375. const char *dc, *ic;
  376. /* Then read cache informations */
  377. if (systemcfg->platform == PLATFORM_POWERMAC) {
  378. dc = "d-cache-block-size";
  379. ic = "i-cache-block-size";
  380. } else {
  381. dc = "d-cache-line-size";
  382. ic = "i-cache-line-size";
  383. }
  384. size = 0;
  385. lsize = cur_cpu_spec->dcache_bsize;
  386. sizep = (u32 *)get_property(np, "d-cache-size", NULL);
  387. if (sizep != NULL)
  388. size = *sizep;
  389. lsizep = (u32 *) get_property(np, dc, NULL);
  390. if (lsizep != NULL)
  391. lsize = *lsizep;
  392. if (sizep == 0 || lsizep == 0)
  393. DBG("Argh, can't find dcache properties ! "
  394. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  395. systemcfg->dcache_size = ppc64_caches.dsize = size;
  396. systemcfg->dcache_line_size =
  397. ppc64_caches.dline_size = lsize;
  398. ppc64_caches.log_dline_size = __ilog2(lsize);
  399. ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
  400. size = 0;
  401. lsize = cur_cpu_spec->icache_bsize;
  402. sizep = (u32 *)get_property(np, "i-cache-size", NULL);
  403. if (sizep != NULL)
  404. size = *sizep;
  405. lsizep = (u32 *)get_property(np, ic, NULL);
  406. if (lsizep != NULL)
  407. lsize = *lsizep;
  408. if (sizep == 0 || lsizep == 0)
  409. DBG("Argh, can't find icache properties ! "
  410. "sizep: %p, lsizep: %p\n", sizep, lsizep);
  411. systemcfg->icache_size = ppc64_caches.isize = size;
  412. systemcfg->icache_line_size =
  413. ppc64_caches.iline_size = lsize;
  414. ppc64_caches.log_iline_size = __ilog2(lsize);
  415. ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
  416. }
  417. }
  418. /* Add an eye catcher and the systemcfg layout version number */
  419. strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
  420. systemcfg->version.major = SYSTEMCFG_MAJOR;
  421. systemcfg->version.minor = SYSTEMCFG_MINOR;
  422. systemcfg->processor = mfspr(SPRN_PVR);
  423. DBG(" <- initialize_cache_info()\n");
  424. }
  425. static void __init check_for_initrd(void)
  426. {
  427. #ifdef CONFIG_BLK_DEV_INITRD
  428. u64 *prop;
  429. DBG(" -> check_for_initrd()\n");
  430. if (of_chosen) {
  431. prop = (u64 *)get_property(of_chosen,
  432. "linux,initrd-start", NULL);
  433. if (prop != NULL) {
  434. initrd_start = (unsigned long)__va(*prop);
  435. prop = (u64 *)get_property(of_chosen,
  436. "linux,initrd-end", NULL);
  437. if (prop != NULL) {
  438. initrd_end = (unsigned long)__va(*prop);
  439. initrd_below_start_ok = 1;
  440. } else
  441. initrd_start = 0;
  442. }
  443. }
  444. /* If we were passed an initrd, set the ROOT_DEV properly if the values
  445. * look sensible. If not, clear initrd reference.
  446. */
  447. if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
  448. initrd_end > initrd_start)
  449. ROOT_DEV = Root_RAM0;
  450. else
  451. initrd_start = initrd_end = 0;
  452. if (initrd_start)
  453. printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
  454. DBG(" <- check_for_initrd()\n");
  455. #endif /* CONFIG_BLK_DEV_INITRD */
  456. }
  457. /*
  458. * Do some initial setup of the system. The parameters are those which
  459. * were passed in from the bootloader.
  460. */
  461. void __init setup_system(void)
  462. {
  463. DBG(" -> setup_system()\n");
  464. /*
  465. * Unflatten the device-tree passed by prom_init or kexec
  466. */
  467. unflatten_device_tree();
  468. /*
  469. * Fill the ppc64_caches & systemcfg structures with informations
  470. * retreived from the device-tree. Need to be called before
  471. * finish_device_tree() since the later requires some of the
  472. * informations filled up here to properly parse the interrupt
  473. * tree.
  474. * It also sets up the cache line sizes which allows to call
  475. * routines like flush_icache_range (used by the hash init
  476. * later on).
  477. */
  478. initialize_cache_info();
  479. #ifdef CONFIG_PPC_RTAS
  480. /*
  481. * Initialize RTAS if available
  482. */
  483. rtas_initialize();
  484. #endif /* CONFIG_PPC_RTAS */
  485. /*
  486. * Check if we have an initrd provided via the device-tree
  487. */
  488. check_for_initrd();
  489. /*
  490. * Do some platform specific early initializations, that includes
  491. * setting up the hash table pointers. It also sets up some interrupt-mapping
  492. * related options that will be used by finish_device_tree()
  493. */
  494. ppc_md.init_early();
  495. /*
  496. * "Finish" the device-tree, that is do the actual parsing of
  497. * some of the properties like the interrupt map
  498. */
  499. finish_device_tree();
  500. #ifdef CONFIG_BOOTX_TEXT
  501. init_boot_display();
  502. #endif
  503. /*
  504. * Initialize xmon
  505. */
  506. #ifdef CONFIG_XMON_DEFAULT
  507. xmon_init(1);
  508. #endif
  509. /*
  510. * Register early console
  511. */
  512. register_early_udbg_console();
  513. /* Save unparsed command line copy for /proc/cmdline */
  514. strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
  515. parse_early_param();
  516. #ifdef CONFIG_SMP
  517. /*
  518. * iSeries has already initialized the cpu maps at this point.
  519. */
  520. setup_cpu_maps();
  521. /* Release secondary cpus out of their spinloops at 0x60 now that
  522. * we can map physical -> logical CPU ids
  523. */
  524. smp_release_cpus();
  525. #endif
  526. printk("Starting Linux PPC64 %s\n", system_utsname.version);
  527. printk("-----------------------------------------------------\n");
  528. printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
  529. printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
  530. printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
  531. printk("systemcfg = 0x%p\n", systemcfg);
  532. printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
  533. printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
  534. printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
  535. printk("ppc64_caches.dcache_line_size = 0x%x\n",
  536. ppc64_caches.dline_size);
  537. printk("ppc64_caches.icache_line_size = 0x%x\n",
  538. ppc64_caches.iline_size);
  539. printk("htab_address = 0x%p\n", htab_address);
  540. printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
  541. printk("-----------------------------------------------------\n");
  542. mm_init_ppc64();
  543. DBG(" <- setup_system()\n");
  544. }
  545. static int ppc64_panic_event(struct notifier_block *this,
  546. unsigned long event, void *ptr)
  547. {
  548. ppc_md.panic((char *)ptr); /* May not return */
  549. return NOTIFY_DONE;
  550. }
  551. #ifdef CONFIG_IRQSTACKS
  552. static void __init irqstack_early_init(void)
  553. {
  554. unsigned int i;
  555. /*
  556. * interrupt stacks must be under 256MB, we cannot afford to take
  557. * SLB misses on them.
  558. */
  559. for_each_cpu(i) {
  560. softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  561. THREAD_SIZE, 0x10000000));
  562. hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
  563. THREAD_SIZE, 0x10000000));
  564. }
  565. }
  566. #else
  567. #define irqstack_early_init()
  568. #endif
  569. /*
  570. * Stack space used when we detect a bad kernel stack pointer, and
  571. * early in SMP boots before relocation is enabled.
  572. */
  573. static void __init emergency_stack_init(void)
  574. {
  575. unsigned long limit;
  576. unsigned int i;
  577. /*
  578. * Emergency stacks must be under 256MB, we cannot afford to take
  579. * SLB misses on them. The ABI also requires them to be 128-byte
  580. * aligned.
  581. *
  582. * Since we use these as temporary stacks during secondary CPU
  583. * bringup, we need to get at them in real mode. This means they
  584. * must also be within the RMO region.
  585. */
  586. limit = min(0x10000000UL, lmb.rmo_size);
  587. for_each_cpu(i)
  588. paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
  589. limit)) + PAGE_SIZE;
  590. }
  591. /*
  592. * Called from setup_arch to initialize the bitmap of available
  593. * syscalls in the systemcfg page
  594. */
  595. void __init setup_syscall_map(void)
  596. {
  597. unsigned int i, count64 = 0, count32 = 0;
  598. extern unsigned long *sys_call_table;
  599. extern unsigned long sys_ni_syscall;
  600. for (i = 0; i < __NR_syscalls; i++) {
  601. if (sys_call_table[i*2] != sys_ni_syscall) {
  602. count64++;
  603. systemcfg->syscall_map_64[i >> 5] |=
  604. 0x80000000UL >> (i & 0x1f);
  605. }
  606. if (sys_call_table[i*2+1] != sys_ni_syscall) {
  607. count32++;
  608. systemcfg->syscall_map_32[i >> 5] |=
  609. 0x80000000UL >> (i & 0x1f);
  610. }
  611. }
  612. printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
  613. count32, count64);
  614. }
  615. /*
  616. * Called into from start_kernel, after lock_kernel has been called.
  617. * Initializes bootmem, which is unsed to manage page allocation until
  618. * mem_init is called.
  619. */
  620. void __init setup_arch(char **cmdline_p)
  621. {
  622. extern void do_init_bootmem(void);
  623. ppc64_boot_msg(0x12, "Setup Arch");
  624. *cmdline_p = cmd_line;
  625. /*
  626. * Set cache line size based on type of cpu as a default.
  627. * Systems with OF can look in the properties on the cpu node(s)
  628. * for a possibly more accurate value.
  629. */
  630. dcache_bsize = ppc64_caches.dline_size;
  631. icache_bsize = ppc64_caches.iline_size;
  632. /* reboot on panic */
  633. panic_timeout = 180;
  634. if (ppc_md.panic)
  635. notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
  636. init_mm.start_code = PAGE_OFFSET;
  637. init_mm.end_code = (unsigned long) _etext;
  638. init_mm.end_data = (unsigned long) _edata;
  639. init_mm.brk = klimit;
  640. irqstack_early_init();
  641. emergency_stack_init();
  642. stabs_alloc();
  643. /* set up the bootmem stuff with available memory */
  644. do_init_bootmem();
  645. sparse_init();
  646. /* initialize the syscall map in systemcfg */
  647. setup_syscall_map();
  648. #ifdef CONFIG_DUMMY_CONSOLE
  649. conswitchp = &dummy_con;
  650. #endif
  651. ppc_md.setup_arch();
  652. /* Use the default idle loop if the platform hasn't provided one. */
  653. if (NULL == ppc_md.idle_loop) {
  654. ppc_md.idle_loop = default_idle;
  655. printk(KERN_INFO "Using default idle loop\n");
  656. }
  657. paging_init();
  658. ppc64_boot_msg(0x15, "Setup Done");
  659. }
  660. /* ToDo: do something useful if ppc_md is not yet setup. */
  661. #define PPC64_LINUX_FUNCTION 0x0f000000
  662. #define PPC64_IPL_MESSAGE 0xc0000000
  663. #define PPC64_TERM_MESSAGE 0xb0000000
  664. static void ppc64_do_msg(unsigned int src, const char *msg)
  665. {
  666. if (ppc_md.progress) {
  667. char buf[128];
  668. sprintf(buf, "%08X\n", src);
  669. ppc_md.progress(buf, 0);
  670. snprintf(buf, 128, "%s", msg);
  671. ppc_md.progress(buf, 0);
  672. }
  673. }
  674. /* Print a boot progress message. */
  675. void ppc64_boot_msg(unsigned int src, const char *msg)
  676. {
  677. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
  678. printk("[boot]%04x %s\n", src, msg);
  679. }
  680. /* Print a termination message (print only -- does not stop the kernel) */
  681. void ppc64_terminate_msg(unsigned int src, const char *msg)
  682. {
  683. ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
  684. printk("[terminate]%04x %s\n", src, msg);
  685. }
  686. #ifndef CONFIG_PPC_ISERIES
  687. /*
  688. * This function can be used by platforms to "find" legacy serial ports.
  689. * It works for "serial" nodes under an "isa" node, and will try to
  690. * respect the "ibm,aix-loc" property if any. It works with up to 8
  691. * ports.
  692. */
  693. #define MAX_LEGACY_SERIAL_PORTS 8
  694. static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
  695. static unsigned int old_serial_count;
  696. void __init generic_find_legacy_serial_ports(u64 *physport,
  697. unsigned int *default_speed)
  698. {
  699. struct device_node *np;
  700. u32 *sizeprop;
  701. struct isa_reg_property {
  702. u32 space;
  703. u32 address;
  704. u32 size;
  705. };
  706. struct pci_reg_property {
  707. struct pci_address addr;
  708. u32 size_hi;
  709. u32 size_lo;
  710. };
  711. DBG(" -> generic_find_legacy_serial_port()\n");
  712. *physport = 0;
  713. if (default_speed)
  714. *default_speed = 0;
  715. np = of_find_node_by_path("/");
  716. if (!np)
  717. return;
  718. /* First fill our array */
  719. for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
  720. struct device_node *isa, *pci;
  721. struct isa_reg_property *reg;
  722. unsigned long phys_size, addr_size, io_base;
  723. u32 *rangesp;
  724. u32 *interrupts, *clk, *spd;
  725. char *typep;
  726. int index, rlen, rentsize;
  727. /* Ok, first check if it's under an "isa" parent */
  728. isa = of_get_parent(np);
  729. if (!isa || strcmp(isa->name, "isa")) {
  730. DBG("%s: no isa parent found\n", np->full_name);
  731. continue;
  732. }
  733. /* Now look for an "ibm,aix-loc" property that gives us ordering
  734. * if any...
  735. */
  736. typep = (char *)get_property(np, "ibm,aix-loc", NULL);
  737. /* Get the ISA port number */
  738. reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
  739. if (reg == NULL)
  740. goto next_port;
  741. /* We assume the interrupt number isn't translated ... */
  742. interrupts = (u32 *)get_property(np, "interrupts", NULL);
  743. /* get clock freq. if present */
  744. clk = (u32 *)get_property(np, "clock-frequency", NULL);
  745. /* get default speed if present */
  746. spd = (u32 *)get_property(np, "current-speed", NULL);
  747. /* Default to locate at end of array */
  748. index = old_serial_count; /* end of the array by default */
  749. /* If we have a location index, then use it */
  750. if (typep && *typep == 'S') {
  751. index = simple_strtol(typep+1, NULL, 0) - 1;
  752. /* if index is out of range, use end of array instead */
  753. if (index >= MAX_LEGACY_SERIAL_PORTS)
  754. index = old_serial_count;
  755. /* if our index is still out of range, that mean that
  756. * array is full, we could scan for a free slot but that
  757. * make little sense to bother, just skip the port
  758. */
  759. if (index >= MAX_LEGACY_SERIAL_PORTS)
  760. goto next_port;
  761. if (index >= old_serial_count)
  762. old_serial_count = index + 1;
  763. /* Check if there is a port who already claimed our slot */
  764. if (serial_ports[index].iobase != 0) {
  765. /* if we still have some room, move it, else override */
  766. if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
  767. DBG("Moved legacy port %d -> %d\n", index,
  768. old_serial_count);
  769. serial_ports[old_serial_count++] =
  770. serial_ports[index];
  771. } else {
  772. DBG("Replacing legacy port %d\n", index);
  773. }
  774. }
  775. }
  776. if (index >= MAX_LEGACY_SERIAL_PORTS)
  777. goto next_port;
  778. if (index >= old_serial_count)
  779. old_serial_count = index + 1;
  780. /* Now fill the entry */
  781. memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
  782. serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
  783. serial_ports[index].iobase = reg->address;
  784. serial_ports[index].irq = interrupts ? interrupts[0] : 0;
  785. serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
  786. DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
  787. index,
  788. serial_ports[index].iobase,
  789. serial_ports[index].irq,
  790. serial_ports[index].uartclk);
  791. /* Get phys address of IO reg for port 1 */
  792. if (index != 0)
  793. goto next_port;
  794. pci = of_get_parent(isa);
  795. if (!pci) {
  796. DBG("%s: no pci parent found\n", np->full_name);
  797. goto next_port;
  798. }
  799. rangesp = (u32 *)get_property(pci, "ranges", &rlen);
  800. if (rangesp == NULL) {
  801. of_node_put(pci);
  802. goto next_port;
  803. }
  804. rlen /= 4;
  805. /* we need the #size-cells of the PCI bridge node itself */
  806. phys_size = 1;
  807. sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
  808. if (sizeprop != NULL)
  809. phys_size = *sizeprop;
  810. /* we need the parent #addr-cells */
  811. addr_size = prom_n_addr_cells(pci);
  812. rentsize = 3 + addr_size + phys_size;
  813. io_base = 0;
  814. for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
  815. if (((rangesp[0] >> 24) & 0x3) != 1)
  816. continue; /* not IO space */
  817. io_base = rangesp[3];
  818. if (addr_size == 2)
  819. io_base = (io_base << 32) | rangesp[4];
  820. }
  821. if (io_base != 0) {
  822. *physport = io_base + reg->address;
  823. if (default_speed && spd)
  824. *default_speed = *spd;
  825. }
  826. of_node_put(pci);
  827. next_port:
  828. of_node_put(isa);
  829. }
  830. DBG(" <- generic_find_legacy_serial_port()\n");
  831. }
  832. static struct platform_device serial_device = {
  833. .name = "serial8250",
  834. .id = PLAT8250_DEV_PLATFORM,
  835. .dev = {
  836. .platform_data = serial_ports,
  837. },
  838. };
  839. static int __init serial_dev_init(void)
  840. {
  841. return platform_device_register(&serial_device);
  842. }
  843. arch_initcall(serial_dev_init);
  844. #endif /* CONFIG_PPC_ISERIES */
  845. int check_legacy_ioport(unsigned long base_port)
  846. {
  847. if (ppc_md.check_legacy_ioport == NULL)
  848. return 0;
  849. return ppc_md.check_legacy_ioport(base_port);
  850. }
  851. EXPORT_SYMBOL(check_legacy_ioport);
  852. #ifdef CONFIG_XMON
  853. static int __init early_xmon(char *p)
  854. {
  855. /* ensure xmon is enabled */
  856. if (p) {
  857. if (strncmp(p, "on", 2) == 0)
  858. xmon_init(1);
  859. if (strncmp(p, "off", 3) == 0)
  860. xmon_init(0);
  861. if (strncmp(p, "early", 5) != 0)
  862. return 0;
  863. }
  864. xmon_init(1);
  865. debugger(NULL);
  866. return 0;
  867. }
  868. early_param("xmon", early_xmon);
  869. #endif
  870. void cpu_die(void)
  871. {
  872. if (ppc_md.cpu_die)
  873. ppc_md.cpu_die();
  874. }