setup.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * arch/s390/kernel/setup.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com),
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "arch/i386/kernel/setup.c"
  10. * Copyright (C) 1995, Linus Torvalds
  11. */
  12. /*
  13. * This file handles the architecture-dependent parts of initialization
  14. */
  15. #include <linux/errno.h>
  16. #include <linux/module.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/stddef.h>
  21. #include <linux/unistd.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/slab.h>
  24. #include <linux/user.h>
  25. #include <linux/a.out.h>
  26. #include <linux/tty.h>
  27. #include <linux/ioport.h>
  28. #include <linux/delay.h>
  29. #include <linux/init.h>
  30. #include <linux/initrd.h>
  31. #include <linux/bootmem.h>
  32. #include <linux/root_dev.h>
  33. #include <linux/console.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/kernel_stat.h>
  36. #include <linux/device.h>
  37. #include <linux/notifier.h>
  38. #include <linux/pfn.h>
  39. #include <linux/ctype.h>
  40. #include <linux/reboot.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/system.h>
  43. #include <asm/smp.h>
  44. #include <asm/mmu_context.h>
  45. #include <asm/cpcmd.h>
  46. #include <asm/lowcore.h>
  47. #include <asm/irq.h>
  48. #include <asm/page.h>
  49. #include <asm/ptrace.h>
  50. #include <asm/sections.h>
  51. #include <asm/ebcdic.h>
  52. #include <asm/compat.h>
  53. long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
  54. PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
  55. long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  56. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  57. PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  58. /*
  59. * User copy operations.
  60. */
  61. struct uaccess_ops uaccess;
  62. EXPORT_SYMBOL_GPL(uaccess);
  63. /*
  64. * Machine setup..
  65. */
  66. unsigned int console_mode = 0;
  67. unsigned int console_devno = -1;
  68. unsigned int console_irq = -1;
  69. unsigned long machine_flags = 0;
  70. struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
  71. volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  72. static unsigned long __initdata memory_end;
  73. /*
  74. * This is set up by the setup-routine at boot-time
  75. * for S390 need to find out, what we have to setup
  76. * using address 0x10400 ...
  77. */
  78. #include <asm/setup.h>
  79. static struct resource code_resource = {
  80. .name = "Kernel code",
  81. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  82. };
  83. static struct resource data_resource = {
  84. .name = "Kernel data",
  85. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  86. };
  87. /*
  88. * cpu_init() initializes state that is per-CPU.
  89. */
  90. void __devinit cpu_init (void)
  91. {
  92. int addr = hard_smp_processor_id();
  93. /*
  94. * Store processor id in lowcore (used e.g. in timer_interrupt)
  95. */
  96. asm volatile("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
  97. S390_lowcore.cpu_data.cpu_addr = addr;
  98. /*
  99. * Force FPU initialization:
  100. */
  101. clear_thread_flag(TIF_USEDFPU);
  102. clear_used_math();
  103. atomic_inc(&init_mm.mm_count);
  104. current->active_mm = &init_mm;
  105. if (current->mm)
  106. BUG();
  107. enter_lazy_tlb(&init_mm, current);
  108. }
  109. /*
  110. * VM halt and poweroff setup routines
  111. */
  112. char vmhalt_cmd[128] = "";
  113. char vmpoff_cmd[128] = "";
  114. static char vmpanic_cmd[128] = "";
  115. static inline void strncpy_skip_quote(char *dst, char *src, int n)
  116. {
  117. int sx, dx;
  118. dx = 0;
  119. for (sx = 0; src[sx] != 0; sx++) {
  120. if (src[sx] == '"') continue;
  121. dst[dx++] = src[sx];
  122. if (dx >= n) break;
  123. }
  124. }
  125. static int __init vmhalt_setup(char *str)
  126. {
  127. strncpy_skip_quote(vmhalt_cmd, str, 127);
  128. vmhalt_cmd[127] = 0;
  129. return 1;
  130. }
  131. __setup("vmhalt=", vmhalt_setup);
  132. static int __init vmpoff_setup(char *str)
  133. {
  134. strncpy_skip_quote(vmpoff_cmd, str, 127);
  135. vmpoff_cmd[127] = 0;
  136. return 1;
  137. }
  138. __setup("vmpoff=", vmpoff_setup);
  139. static int vmpanic_notify(struct notifier_block *self, unsigned long event,
  140. void *data)
  141. {
  142. if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
  143. cpcmd(vmpanic_cmd, NULL, 0, NULL);
  144. return NOTIFY_OK;
  145. }
  146. #define PANIC_PRI_VMPANIC 0
  147. static struct notifier_block vmpanic_nb = {
  148. .notifier_call = vmpanic_notify,
  149. .priority = PANIC_PRI_VMPANIC
  150. };
  151. static int __init vmpanic_setup(char *str)
  152. {
  153. static int register_done __initdata = 0;
  154. strncpy_skip_quote(vmpanic_cmd, str, 127);
  155. vmpanic_cmd[127] = 0;
  156. if (!register_done) {
  157. register_done = 1;
  158. atomic_notifier_chain_register(&panic_notifier_list,
  159. &vmpanic_nb);
  160. }
  161. return 1;
  162. }
  163. __setup("vmpanic=", vmpanic_setup);
  164. /*
  165. * condev= and conmode= setup parameter.
  166. */
  167. static int __init condev_setup(char *str)
  168. {
  169. int vdev;
  170. vdev = simple_strtoul(str, &str, 0);
  171. if (vdev >= 0 && vdev < 65536) {
  172. console_devno = vdev;
  173. console_irq = -1;
  174. }
  175. return 1;
  176. }
  177. __setup("condev=", condev_setup);
  178. static int __init conmode_setup(char *str)
  179. {
  180. #if defined(CONFIG_SCLP_CONSOLE)
  181. if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
  182. SET_CONSOLE_SCLP;
  183. #endif
  184. #if defined(CONFIG_TN3215_CONSOLE)
  185. if (strncmp(str, "3215", 5) == 0)
  186. SET_CONSOLE_3215;
  187. #endif
  188. #if defined(CONFIG_TN3270_CONSOLE)
  189. if (strncmp(str, "3270", 5) == 0)
  190. SET_CONSOLE_3270;
  191. #endif
  192. return 1;
  193. }
  194. __setup("conmode=", conmode_setup);
  195. static void __init conmode_default(void)
  196. {
  197. char query_buffer[1024];
  198. char *ptr;
  199. if (MACHINE_IS_VM) {
  200. cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
  201. console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
  202. ptr = strstr(query_buffer, "SUBCHANNEL =");
  203. console_irq = simple_strtoul(ptr + 13, NULL, 16);
  204. cpcmd("QUERY TERM", query_buffer, 1024, NULL);
  205. ptr = strstr(query_buffer, "CONMODE");
  206. /*
  207. * Set the conmode to 3215 so that the device recognition
  208. * will set the cu_type of the console to 3215. If the
  209. * conmode is 3270 and we don't set it back then both
  210. * 3215 and the 3270 driver will try to access the console
  211. * device (3215 as console and 3270 as normal tty).
  212. */
  213. cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
  214. if (ptr == NULL) {
  215. #if defined(CONFIG_SCLP_CONSOLE)
  216. SET_CONSOLE_SCLP;
  217. #endif
  218. return;
  219. }
  220. if (strncmp(ptr + 8, "3270", 4) == 0) {
  221. #if defined(CONFIG_TN3270_CONSOLE)
  222. SET_CONSOLE_3270;
  223. #elif defined(CONFIG_TN3215_CONSOLE)
  224. SET_CONSOLE_3215;
  225. #elif defined(CONFIG_SCLP_CONSOLE)
  226. SET_CONSOLE_SCLP;
  227. #endif
  228. } else if (strncmp(ptr + 8, "3215", 4) == 0) {
  229. #if defined(CONFIG_TN3215_CONSOLE)
  230. SET_CONSOLE_3215;
  231. #elif defined(CONFIG_TN3270_CONSOLE)
  232. SET_CONSOLE_3270;
  233. #elif defined(CONFIG_SCLP_CONSOLE)
  234. SET_CONSOLE_SCLP;
  235. #endif
  236. }
  237. } else if (MACHINE_IS_P390) {
  238. #if defined(CONFIG_TN3215_CONSOLE)
  239. SET_CONSOLE_3215;
  240. #elif defined(CONFIG_TN3270_CONSOLE)
  241. SET_CONSOLE_3270;
  242. #endif
  243. } else {
  244. #if defined(CONFIG_SCLP_CONSOLE)
  245. SET_CONSOLE_SCLP;
  246. #endif
  247. }
  248. }
  249. /*
  250. * Create a Kernel NSS if the SAVESYS= parameter is defined
  251. */
  252. #define DEFSYS_CMD_SIZE 96
  253. #define SAVESYS_CMD_SIZE 32
  254. extern int _eshared;
  255. char kernel_nss_name[NSS_NAME_SIZE + 1];
  256. #ifdef CONFIG_SHARED_KERNEL
  257. static __init void create_kernel_nss(void)
  258. {
  259. unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
  260. #ifdef CONFIG_BLK_DEV_INITRD
  261. unsigned int sinitrd_pfn, einitrd_pfn;
  262. #endif
  263. int response;
  264. char *savesys_ptr;
  265. char upper_command_line[COMMAND_LINE_SIZE];
  266. char defsys_cmd[DEFSYS_CMD_SIZE];
  267. char savesys_cmd[SAVESYS_CMD_SIZE];
  268. /* Do nothing if we are not running under VM */
  269. if (!MACHINE_IS_VM)
  270. return;
  271. /* Convert COMMAND_LINE to upper case */
  272. for (i = 0; i < strlen(COMMAND_LINE); i++)
  273. upper_command_line[i] = toupper(COMMAND_LINE[i]);
  274. savesys_ptr = strstr(upper_command_line, "SAVESYS=");
  275. if (!savesys_ptr)
  276. return;
  277. savesys_ptr += 8; /* Point to the beginning of the NSS name */
  278. for (i = 0; i < NSS_NAME_SIZE; i++) {
  279. if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
  280. break;
  281. kernel_nss_name[i] = savesys_ptr[i];
  282. }
  283. stext_pfn = PFN_DOWN(__pa(&_stext));
  284. eshared_pfn = PFN_DOWN(__pa(&_eshared));
  285. end_pfn = PFN_UP(__pa(&_end));
  286. min_size = end_pfn << 2;
  287. sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
  288. kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
  289. eshared_pfn, end_pfn);
  290. #ifdef CONFIG_BLK_DEV_INITRD
  291. if (INITRD_START && INITRD_SIZE) {
  292. sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
  293. einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
  294. min_size = einitrd_pfn << 2;
  295. sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
  296. sinitrd_pfn, einitrd_pfn);
  297. }
  298. #endif
  299. sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
  300. sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
  301. kernel_nss_name, kernel_nss_name);
  302. __cpcmd(defsys_cmd, NULL, 0, &response);
  303. if (response != 0)
  304. return;
  305. __cpcmd(savesys_cmd, NULL, 0, &response);
  306. if (response != strlen(savesys_cmd))
  307. return;
  308. ipl_flags = IPL_NSS_VALID;
  309. }
  310. #else /* CONFIG_SHARED_KERNEL */
  311. static inline void create_kernel_nss(void) { }
  312. #endif /* CONFIG_SHARED_KERNEL */
  313. /*
  314. * Clear bss memory
  315. */
  316. static __init void clear_bss_section(void)
  317. {
  318. memset(__bss_start, 0, _end - __bss_start);
  319. }
  320. /*
  321. * Initialize storage key for kernel pages
  322. */
  323. static __init void init_kernel_storage_key(void)
  324. {
  325. unsigned long end_pfn, init_pfn;
  326. end_pfn = PFN_UP(__pa(&_end));
  327. for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
  328. page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
  329. }
  330. static __init void detect_machine_type(void)
  331. {
  332. struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
  333. asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id));
  334. /* Running under z/VM ? */
  335. if (cpuinfo->cpu_id.version == 0xff)
  336. machine_flags |= 1;
  337. /* Running on a P/390 ? */
  338. if (cpuinfo->cpu_id.machine == 0x7490)
  339. machine_flags |= 4;
  340. }
  341. /*
  342. * Save ipl parameters, clear bss memory, initialize storage keys
  343. * and create a kernel NSS at startup if the SAVESYS= parm is defined
  344. */
  345. void __init startup_init(void)
  346. {
  347. ipl_save_parameters();
  348. clear_bss_section();
  349. init_kernel_storage_key();
  350. lockdep_init();
  351. detect_machine_type();
  352. create_kernel_nss();
  353. }
  354. #ifdef CONFIG_SMP
  355. void (*_machine_restart)(char *command) = machine_restart_smp;
  356. void (*_machine_halt)(void) = machine_halt_smp;
  357. void (*_machine_power_off)(void) = machine_power_off_smp;
  358. #else
  359. /*
  360. * Reboot, halt and power_off routines for non SMP.
  361. */
  362. static void do_machine_restart_nonsmp(char * __unused)
  363. {
  364. do_reipl();
  365. }
  366. static void do_machine_halt_nonsmp(void)
  367. {
  368. if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
  369. __cpcmd(vmhalt_cmd, NULL, 0, NULL);
  370. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  371. }
  372. static void do_machine_power_off_nonsmp(void)
  373. {
  374. if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
  375. __cpcmd(vmpoff_cmd, NULL, 0, NULL);
  376. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  377. }
  378. void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
  379. void (*_machine_halt)(void) = do_machine_halt_nonsmp;
  380. void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
  381. #endif
  382. /*
  383. * Reboot, halt and power_off stubs. They just call _machine_restart,
  384. * _machine_halt or _machine_power_off.
  385. */
  386. void machine_restart(char *command)
  387. {
  388. if (!in_interrupt() || oops_in_progress)
  389. /*
  390. * Only unblank the console if we are called in enabled
  391. * context or a bust_spinlocks cleared the way for us.
  392. */
  393. console_unblank();
  394. _machine_restart(command);
  395. }
  396. void machine_halt(void)
  397. {
  398. if (!in_interrupt() || oops_in_progress)
  399. /*
  400. * Only unblank the console if we are called in enabled
  401. * context or a bust_spinlocks cleared the way for us.
  402. */
  403. console_unblank();
  404. _machine_halt();
  405. }
  406. void machine_power_off(void)
  407. {
  408. if (!in_interrupt() || oops_in_progress)
  409. /*
  410. * Only unblank the console if we are called in enabled
  411. * context or a bust_spinlocks cleared the way for us.
  412. */
  413. console_unblank();
  414. _machine_power_off();
  415. }
  416. /*
  417. * Dummy power off function.
  418. */
  419. void (*pm_power_off)(void) = machine_power_off;
  420. static int __init early_parse_mem(char *p)
  421. {
  422. memory_end = memparse(p, &p);
  423. return 0;
  424. }
  425. early_param("mem", early_parse_mem);
  426. /*
  427. * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
  428. */
  429. static int __init early_parse_ipldelay(char *p)
  430. {
  431. unsigned long delay = 0;
  432. delay = simple_strtoul(p, &p, 0);
  433. switch (*p) {
  434. case 's':
  435. case 'S':
  436. delay *= 1000000;
  437. break;
  438. case 'm':
  439. case 'M':
  440. delay *= 60 * 1000000;
  441. }
  442. /* now wait for the requested amount of time */
  443. udelay(delay);
  444. return 0;
  445. }
  446. early_param("ipldelay", early_parse_ipldelay);
  447. #ifdef CONFIG_S390_SWITCH_AMODE
  448. unsigned int switch_amode = 0;
  449. EXPORT_SYMBOL_GPL(switch_amode);
  450. static inline void set_amode_and_uaccess(unsigned long user_amode,
  451. unsigned long user32_amode)
  452. {
  453. psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
  454. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  455. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  456. #ifdef CONFIG_COMPAT
  457. psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
  458. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  459. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  460. psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
  461. PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
  462. PSW32_MASK_PSTATE;
  463. #endif
  464. psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  465. PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
  466. if (MACHINE_HAS_MVCOS) {
  467. printk("mvcos available.\n");
  468. memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
  469. } else {
  470. printk("mvcos not available.\n");
  471. memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
  472. }
  473. }
  474. /*
  475. * Switch kernel/user addressing modes?
  476. */
  477. static int __init early_parse_switch_amode(char *p)
  478. {
  479. switch_amode = 1;
  480. return 0;
  481. }
  482. early_param("switch_amode", early_parse_switch_amode);
  483. #else /* CONFIG_S390_SWITCH_AMODE */
  484. static inline void set_amode_and_uaccess(unsigned long user_amode,
  485. unsigned long user32_amode)
  486. {
  487. }
  488. #endif /* CONFIG_S390_SWITCH_AMODE */
  489. #ifdef CONFIG_S390_EXEC_PROTECT
  490. unsigned int s390_noexec = 0;
  491. EXPORT_SYMBOL_GPL(s390_noexec);
  492. /*
  493. * Enable execute protection?
  494. */
  495. static int __init early_parse_noexec(char *p)
  496. {
  497. if (!strncmp(p, "off", 3))
  498. return 0;
  499. switch_amode = 1;
  500. s390_noexec = 1;
  501. return 0;
  502. }
  503. early_param("noexec", early_parse_noexec);
  504. #endif /* CONFIG_S390_EXEC_PROTECT */
  505. static void setup_addressing_mode(void)
  506. {
  507. if (s390_noexec) {
  508. printk("S390 execute protection active, ");
  509. set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
  510. return;
  511. }
  512. if (switch_amode) {
  513. printk("S390 address spaces switched, ");
  514. set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
  515. }
  516. }
  517. static void __init
  518. setup_lowcore(void)
  519. {
  520. struct _lowcore *lc;
  521. int lc_pages;
  522. /*
  523. * Setup lowcore for boot cpu
  524. */
  525. lc_pages = sizeof(void *) == 8 ? 2 : 1;
  526. lc = (struct _lowcore *)
  527. __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
  528. memset(lc, 0, lc_pages * PAGE_SIZE);
  529. lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
  530. lc->restart_psw.addr =
  531. PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
  532. if (switch_amode)
  533. lc->restart_psw.mask |= PSW_ASC_HOME;
  534. lc->external_new_psw.mask = psw_kernel_bits;
  535. lc->external_new_psw.addr =
  536. PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
  537. lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
  538. lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
  539. lc->program_new_psw.mask = psw_kernel_bits;
  540. lc->program_new_psw.addr =
  541. PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
  542. lc->mcck_new_psw.mask =
  543. psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
  544. lc->mcck_new_psw.addr =
  545. PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
  546. lc->io_new_psw.mask = psw_kernel_bits;
  547. lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
  548. lc->ipl_device = S390_lowcore.ipl_device;
  549. lc->jiffy_timer = -1LL;
  550. lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
  551. lc->async_stack = (unsigned long)
  552. __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
  553. lc->panic_stack = (unsigned long)
  554. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
  555. lc->current_task = (unsigned long) init_thread_union.thread_info.task;
  556. lc->thread_info = (unsigned long) &init_thread_union;
  557. #ifndef CONFIG_64BIT
  558. if (MACHINE_HAS_IEEE) {
  559. lc->extended_save_area_addr = (__u32)
  560. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
  561. /* enable extended save area */
  562. __ctl_set_bit(14, 29);
  563. }
  564. #endif
  565. set_prefix((u32)(unsigned long) lc);
  566. }
  567. static void __init
  568. setup_resources(void)
  569. {
  570. struct resource *res, *sub_res;
  571. int i;
  572. code_resource.start = (unsigned long) &_text;
  573. code_resource.end = (unsigned long) &_etext - 1;
  574. data_resource.start = (unsigned long) &_etext;
  575. data_resource.end = (unsigned long) &_edata - 1;
  576. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  577. res = alloc_bootmem_low(sizeof(struct resource));
  578. res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
  579. switch (memory_chunk[i].type) {
  580. case CHUNK_READ_WRITE:
  581. res->name = "System RAM";
  582. break;
  583. case CHUNK_READ_ONLY:
  584. res->name = "System ROM";
  585. res->flags |= IORESOURCE_READONLY;
  586. break;
  587. default:
  588. res->name = "reserved";
  589. }
  590. res->start = memory_chunk[i].addr;
  591. res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
  592. request_resource(&iomem_resource, res);
  593. if (code_resource.start >= res->start &&
  594. code_resource.start <= res->end &&
  595. code_resource.end > res->end) {
  596. sub_res = alloc_bootmem_low(sizeof(struct resource));
  597. memcpy(sub_res, &code_resource,
  598. sizeof(struct resource));
  599. sub_res->end = res->end;
  600. code_resource.start = res->end + 1;
  601. request_resource(res, sub_res);
  602. }
  603. if (code_resource.start >= res->start &&
  604. code_resource.start <= res->end &&
  605. code_resource.end <= res->end)
  606. request_resource(res, &code_resource);
  607. if (data_resource.start >= res->start &&
  608. data_resource.start <= res->end &&
  609. data_resource.end > res->end) {
  610. sub_res = alloc_bootmem_low(sizeof(struct resource));
  611. memcpy(sub_res, &data_resource,
  612. sizeof(struct resource));
  613. sub_res->end = res->end;
  614. data_resource.start = res->end + 1;
  615. request_resource(res, sub_res);
  616. }
  617. if (data_resource.start >= res->start &&
  618. data_resource.start <= res->end &&
  619. data_resource.end <= res->end)
  620. request_resource(res, &data_resource);
  621. }
  622. }
  623. static void __init setup_memory_end(void)
  624. {
  625. unsigned long real_size, memory_size;
  626. unsigned long max_mem, max_phys;
  627. int i;
  628. memory_size = real_size = 0;
  629. max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
  630. memory_end &= PAGE_MASK;
  631. max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
  632. for (i = 0; i < MEMORY_CHUNKS; i++) {
  633. struct mem_chunk *chunk = &memory_chunk[i];
  634. real_size = max(real_size, chunk->addr + chunk->size);
  635. if (chunk->addr >= max_mem) {
  636. memset(chunk, 0, sizeof(*chunk));
  637. continue;
  638. }
  639. if (chunk->addr + chunk->size > max_mem)
  640. chunk->size = max_mem - chunk->addr;
  641. memory_size = max(memory_size, chunk->addr + chunk->size);
  642. }
  643. if (!memory_end)
  644. memory_end = memory_size;
  645. }
  646. static void __init
  647. setup_memory(void)
  648. {
  649. unsigned long bootmap_size;
  650. unsigned long start_pfn, end_pfn;
  651. int i;
  652. /*
  653. * partially used pages are not usable - thus
  654. * we are rounding upwards:
  655. */
  656. start_pfn = PFN_UP(__pa(&_end));
  657. end_pfn = max_pfn = PFN_DOWN(memory_end);
  658. #ifdef CONFIG_BLK_DEV_INITRD
  659. /*
  660. * Move the initrd in case the bitmap of the bootmem allocater
  661. * would overwrite it.
  662. */
  663. if (INITRD_START && INITRD_SIZE) {
  664. unsigned long bmap_size;
  665. unsigned long start;
  666. bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
  667. bmap_size = PFN_PHYS(bmap_size);
  668. if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
  669. start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
  670. if (start + INITRD_SIZE > memory_end) {
  671. printk("initrd extends beyond end of memory "
  672. "(0x%08lx > 0x%08lx)\n"
  673. "disabling initrd\n",
  674. start + INITRD_SIZE, memory_end);
  675. INITRD_START = INITRD_SIZE = 0;
  676. } else {
  677. printk("Moving initrd (0x%08lx -> 0x%08lx, "
  678. "size: %ld)\n",
  679. INITRD_START, start, INITRD_SIZE);
  680. memmove((void *) start, (void *) INITRD_START,
  681. INITRD_SIZE);
  682. INITRD_START = start;
  683. }
  684. }
  685. }
  686. #endif
  687. /*
  688. * Initialize the boot-time allocator
  689. */
  690. bootmap_size = init_bootmem(start_pfn, end_pfn);
  691. /*
  692. * Register RAM areas with the bootmem allocator.
  693. */
  694. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  695. unsigned long start_chunk, end_chunk, pfn;
  696. if (memory_chunk[i].type != CHUNK_READ_WRITE)
  697. continue;
  698. start_chunk = PFN_DOWN(memory_chunk[i].addr);
  699. end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
  700. end_chunk = min(end_chunk, end_pfn);
  701. if (start_chunk >= end_chunk)
  702. continue;
  703. add_active_range(0, start_chunk, end_chunk);
  704. pfn = max(start_chunk, start_pfn);
  705. for (; pfn <= end_chunk; pfn++)
  706. page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
  707. }
  708. psw_set_key(PAGE_DEFAULT_KEY);
  709. free_bootmem_with_active_regions(0, max_pfn);
  710. reserve_bootmem(0, PFN_PHYS(start_pfn));
  711. /*
  712. * Reserve the bootmem bitmap itself as well. We do this in two
  713. * steps (first step was init_bootmem()) because this catches
  714. * the (very unlikely) case of us accidentally initializing the
  715. * bootmem allocator with an invalid RAM area.
  716. */
  717. reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
  718. #ifdef CONFIG_BLK_DEV_INITRD
  719. if (INITRD_START && INITRD_SIZE) {
  720. if (INITRD_START + INITRD_SIZE <= memory_end) {
  721. reserve_bootmem(INITRD_START, INITRD_SIZE);
  722. initrd_start = INITRD_START;
  723. initrd_end = initrd_start + INITRD_SIZE;
  724. } else {
  725. printk("initrd extends beyond end of memory "
  726. "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
  727. initrd_start + INITRD_SIZE, memory_end);
  728. initrd_start = initrd_end = 0;
  729. }
  730. }
  731. #endif
  732. }
  733. /*
  734. * Setup function called from init/main.c just after the banner
  735. * was printed.
  736. */
  737. void __init
  738. setup_arch(char **cmdline_p)
  739. {
  740. /*
  741. * print what head.S has found out about the machine
  742. */
  743. #ifndef CONFIG_64BIT
  744. printk((MACHINE_IS_VM) ?
  745. "We are running under VM (31 bit mode)\n" :
  746. "We are running native (31 bit mode)\n");
  747. printk((MACHINE_HAS_IEEE) ?
  748. "This machine has an IEEE fpu\n" :
  749. "This machine has no IEEE fpu\n");
  750. #else /* CONFIG_64BIT */
  751. printk((MACHINE_IS_VM) ?
  752. "We are running under VM (64 bit mode)\n" :
  753. "We are running native (64 bit mode)\n");
  754. #endif /* CONFIG_64BIT */
  755. /* Save unparsed command line copy for /proc/cmdline */
  756. strlcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
  757. *cmdline_p = COMMAND_LINE;
  758. *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
  759. ROOT_DEV = Root_RAM0;
  760. init_mm.start_code = PAGE_OFFSET;
  761. init_mm.end_code = (unsigned long) &_etext;
  762. init_mm.end_data = (unsigned long) &_edata;
  763. init_mm.brk = (unsigned long) &_end;
  764. if (MACHINE_HAS_MVCOS)
  765. memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
  766. else
  767. memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
  768. parse_early_param();
  769. setup_memory_end();
  770. setup_addressing_mode();
  771. setup_memory();
  772. setup_resources();
  773. setup_lowcore();
  774. cpu_init();
  775. __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
  776. smp_setup_cpu_possible_map();
  777. /*
  778. * Create kernel page tables and switch to virtual addressing.
  779. */
  780. paging_init();
  781. /* Setup default console */
  782. conmode_default();
  783. }
  784. void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
  785. {
  786. printk("cpu %d "
  787. #ifdef CONFIG_SMP
  788. "phys_idx=%d "
  789. #endif
  790. "vers=%02X ident=%06X machine=%04X unused=%04X\n",
  791. cpuinfo->cpu_nr,
  792. #ifdef CONFIG_SMP
  793. cpuinfo->cpu_addr,
  794. #endif
  795. cpuinfo->cpu_id.version,
  796. cpuinfo->cpu_id.ident,
  797. cpuinfo->cpu_id.machine,
  798. cpuinfo->cpu_id.unused);
  799. }
  800. /*
  801. * show_cpuinfo - Get information on one CPU for use by procfs.
  802. */
  803. static int show_cpuinfo(struct seq_file *m, void *v)
  804. {
  805. struct cpuinfo_S390 *cpuinfo;
  806. unsigned long n = (unsigned long) v - 1;
  807. preempt_disable();
  808. if (!n) {
  809. seq_printf(m, "vendor_id : IBM/S390\n"
  810. "# processors : %i\n"
  811. "bogomips per cpu: %lu.%02lu\n",
  812. num_online_cpus(), loops_per_jiffy/(500000/HZ),
  813. (loops_per_jiffy/(5000/HZ))%100);
  814. }
  815. if (cpu_online(n)) {
  816. #ifdef CONFIG_SMP
  817. if (smp_processor_id() == n)
  818. cpuinfo = &S390_lowcore.cpu_data;
  819. else
  820. cpuinfo = &lowcore_ptr[n]->cpu_data;
  821. #else
  822. cpuinfo = &S390_lowcore.cpu_data;
  823. #endif
  824. seq_printf(m, "processor %li: "
  825. "version = %02X, "
  826. "identification = %06X, "
  827. "machine = %04X\n",
  828. n, cpuinfo->cpu_id.version,
  829. cpuinfo->cpu_id.ident,
  830. cpuinfo->cpu_id.machine);
  831. }
  832. preempt_enable();
  833. return 0;
  834. }
  835. static void *c_start(struct seq_file *m, loff_t *pos)
  836. {
  837. return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
  838. }
  839. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  840. {
  841. ++*pos;
  842. return c_start(m, pos);
  843. }
  844. static void c_stop(struct seq_file *m, void *v)
  845. {
  846. }
  847. struct seq_operations cpuinfo_op = {
  848. .start = c_start,
  849. .next = c_next,
  850. .stop = c_stop,
  851. .show = show_cpuinfo,
  852. };