setup.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /*
  2. * arch/s390/kernel/setup.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com),
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "arch/i386/kernel/setup.c"
  10. * Copyright (C) 1995, Linus Torvalds
  11. */
  12. /*
  13. * This file handles the architecture-dependent parts of initialization
  14. */
  15. #include <linux/errno.h>
  16. #include <linux/module.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/stddef.h>
  21. #include <linux/unistd.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/slab.h>
  24. #include <linux/user.h>
  25. #include <linux/a.out.h>
  26. #include <linux/tty.h>
  27. #include <linux/ioport.h>
  28. #include <linux/delay.h>
  29. #include <linux/init.h>
  30. #include <linux/initrd.h>
  31. #include <linux/bootmem.h>
  32. #include <linux/root_dev.h>
  33. #include <linux/console.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/kernel_stat.h>
  36. #include <linux/device.h>
  37. #include <linux/notifier.h>
  38. #include <linux/pfn.h>
  39. #include <linux/ctype.h>
  40. #include <linux/reboot.h>
  41. #include <asm/ipl.h>
  42. #include <asm/uaccess.h>
  43. #include <asm/system.h>
  44. #include <asm/smp.h>
  45. #include <asm/mmu_context.h>
  46. #include <asm/cpcmd.h>
  47. #include <asm/lowcore.h>
  48. #include <asm/irq.h>
  49. #include <asm/page.h>
  50. #include <asm/ptrace.h>
  51. #include <asm/sections.h>
  52. #include <asm/ebcdic.h>
  53. #include <asm/compat.h>
  54. long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
  55. PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
  56. long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  57. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  58. PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  59. /*
  60. * User copy operations.
  61. */
  62. struct uaccess_ops uaccess;
  63. EXPORT_SYMBOL_GPL(uaccess);
  64. /*
  65. * Machine setup..
  66. */
  67. unsigned int console_mode = 0;
  68. unsigned int console_devno = -1;
  69. unsigned int console_irq = -1;
  70. unsigned long machine_flags = 0;
  71. struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
  72. volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  73. static unsigned long __initdata memory_end;
  74. /*
  75. * This is set up by the setup-routine at boot-time
  76. * for S390 need to find out, what we have to setup
  77. * using address 0x10400 ...
  78. */
  79. #include <asm/setup.h>
  80. static struct resource code_resource = {
  81. .name = "Kernel code",
  82. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  83. };
  84. static struct resource data_resource = {
  85. .name = "Kernel data",
  86. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  87. };
  88. /*
  89. * cpu_init() initializes state that is per-CPU.
  90. */
  91. void __devinit cpu_init (void)
  92. {
  93. int addr = hard_smp_processor_id();
  94. /*
  95. * Store processor id in lowcore (used e.g. in timer_interrupt)
  96. */
  97. get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
  98. S390_lowcore.cpu_data.cpu_addr = addr;
  99. /*
  100. * Force FPU initialization:
  101. */
  102. clear_thread_flag(TIF_USEDFPU);
  103. clear_used_math();
  104. atomic_inc(&init_mm.mm_count);
  105. current->active_mm = &init_mm;
  106. if (current->mm)
  107. BUG();
  108. enter_lazy_tlb(&init_mm, current);
  109. }
  110. /*
  111. * VM halt and poweroff setup routines
  112. */
  113. char vmhalt_cmd[128] = "";
  114. char vmpoff_cmd[128] = "";
  115. static char vmpanic_cmd[128] = "";
  116. static void strncpy_skip_quote(char *dst, char *src, int n)
  117. {
  118. int sx, dx;
  119. dx = 0;
  120. for (sx = 0; src[sx] != 0; sx++) {
  121. if (src[sx] == '"') continue;
  122. dst[dx++] = src[sx];
  123. if (dx >= n) break;
  124. }
  125. }
  126. static int __init vmhalt_setup(char *str)
  127. {
  128. strncpy_skip_quote(vmhalt_cmd, str, 127);
  129. vmhalt_cmd[127] = 0;
  130. return 1;
  131. }
  132. __setup("vmhalt=", vmhalt_setup);
  133. static int __init vmpoff_setup(char *str)
  134. {
  135. strncpy_skip_quote(vmpoff_cmd, str, 127);
  136. vmpoff_cmd[127] = 0;
  137. return 1;
  138. }
  139. __setup("vmpoff=", vmpoff_setup);
  140. static int vmpanic_notify(struct notifier_block *self, unsigned long event,
  141. void *data)
  142. {
  143. if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
  144. cpcmd(vmpanic_cmd, NULL, 0, NULL);
  145. return NOTIFY_OK;
  146. }
  147. #define PANIC_PRI_VMPANIC 0
  148. static struct notifier_block vmpanic_nb = {
  149. .notifier_call = vmpanic_notify,
  150. .priority = PANIC_PRI_VMPANIC
  151. };
  152. static int __init vmpanic_setup(char *str)
  153. {
  154. static int register_done __initdata = 0;
  155. strncpy_skip_quote(vmpanic_cmd, str, 127);
  156. vmpanic_cmd[127] = 0;
  157. if (!register_done) {
  158. register_done = 1;
  159. atomic_notifier_chain_register(&panic_notifier_list,
  160. &vmpanic_nb);
  161. }
  162. return 1;
  163. }
  164. __setup("vmpanic=", vmpanic_setup);
  165. /*
  166. * condev= and conmode= setup parameter.
  167. */
  168. static int __init condev_setup(char *str)
  169. {
  170. int vdev;
  171. vdev = simple_strtoul(str, &str, 0);
  172. if (vdev >= 0 && vdev < 65536) {
  173. console_devno = vdev;
  174. console_irq = -1;
  175. }
  176. return 1;
  177. }
  178. __setup("condev=", condev_setup);
  179. static int __init conmode_setup(char *str)
  180. {
  181. #if defined(CONFIG_SCLP_CONSOLE)
  182. if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
  183. SET_CONSOLE_SCLP;
  184. #endif
  185. #if defined(CONFIG_TN3215_CONSOLE)
  186. if (strncmp(str, "3215", 5) == 0)
  187. SET_CONSOLE_3215;
  188. #endif
  189. #if defined(CONFIG_TN3270_CONSOLE)
  190. if (strncmp(str, "3270", 5) == 0)
  191. SET_CONSOLE_3270;
  192. #endif
  193. return 1;
  194. }
  195. __setup("conmode=", conmode_setup);
  196. static void __init conmode_default(void)
  197. {
  198. char query_buffer[1024];
  199. char *ptr;
  200. if (MACHINE_IS_VM) {
  201. cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
  202. console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
  203. ptr = strstr(query_buffer, "SUBCHANNEL =");
  204. console_irq = simple_strtoul(ptr + 13, NULL, 16);
  205. cpcmd("QUERY TERM", query_buffer, 1024, NULL);
  206. ptr = strstr(query_buffer, "CONMODE");
  207. /*
  208. * Set the conmode to 3215 so that the device recognition
  209. * will set the cu_type of the console to 3215. If the
  210. * conmode is 3270 and we don't set it back then both
  211. * 3215 and the 3270 driver will try to access the console
  212. * device (3215 as console and 3270 as normal tty).
  213. */
  214. cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
  215. if (ptr == NULL) {
  216. #if defined(CONFIG_SCLP_CONSOLE)
  217. SET_CONSOLE_SCLP;
  218. #endif
  219. return;
  220. }
  221. if (strncmp(ptr + 8, "3270", 4) == 0) {
  222. #if defined(CONFIG_TN3270_CONSOLE)
  223. SET_CONSOLE_3270;
  224. #elif defined(CONFIG_TN3215_CONSOLE)
  225. SET_CONSOLE_3215;
  226. #elif defined(CONFIG_SCLP_CONSOLE)
  227. SET_CONSOLE_SCLP;
  228. #endif
  229. } else if (strncmp(ptr + 8, "3215", 4) == 0) {
  230. #if defined(CONFIG_TN3215_CONSOLE)
  231. SET_CONSOLE_3215;
  232. #elif defined(CONFIG_TN3270_CONSOLE)
  233. SET_CONSOLE_3270;
  234. #elif defined(CONFIG_SCLP_CONSOLE)
  235. SET_CONSOLE_SCLP;
  236. #endif
  237. }
  238. } else if (MACHINE_IS_P390) {
  239. #if defined(CONFIG_TN3215_CONSOLE)
  240. SET_CONSOLE_3215;
  241. #elif defined(CONFIG_TN3270_CONSOLE)
  242. SET_CONSOLE_3270;
  243. #endif
  244. } else {
  245. #if defined(CONFIG_SCLP_CONSOLE)
  246. SET_CONSOLE_SCLP;
  247. #endif
  248. }
  249. }
  250. #ifdef CONFIG_SMP
  251. void (*_machine_restart)(char *command) = machine_restart_smp;
  252. void (*_machine_halt)(void) = machine_halt_smp;
  253. void (*_machine_power_off)(void) = machine_power_off_smp;
  254. #else
  255. /*
  256. * Reboot, halt and power_off routines for non SMP.
  257. */
  258. static void do_machine_restart_nonsmp(char * __unused)
  259. {
  260. do_reipl();
  261. }
  262. static void do_machine_halt_nonsmp(void)
  263. {
  264. if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
  265. __cpcmd(vmhalt_cmd, NULL, 0, NULL);
  266. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  267. }
  268. static void do_machine_power_off_nonsmp(void)
  269. {
  270. if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
  271. __cpcmd(vmpoff_cmd, NULL, 0, NULL);
  272. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  273. }
  274. void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
  275. void (*_machine_halt)(void) = do_machine_halt_nonsmp;
  276. void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
  277. #endif
  278. /*
  279. * Reboot, halt and power_off stubs. They just call _machine_restart,
  280. * _machine_halt or _machine_power_off.
  281. */
  282. void machine_restart(char *command)
  283. {
  284. if (!in_interrupt() || oops_in_progress)
  285. /*
  286. * Only unblank the console if we are called in enabled
  287. * context or a bust_spinlocks cleared the way for us.
  288. */
  289. console_unblank();
  290. _machine_restart(command);
  291. }
  292. void machine_halt(void)
  293. {
  294. if (!in_interrupt() || oops_in_progress)
  295. /*
  296. * Only unblank the console if we are called in enabled
  297. * context or a bust_spinlocks cleared the way for us.
  298. */
  299. console_unblank();
  300. _machine_halt();
  301. }
  302. void machine_power_off(void)
  303. {
  304. if (!in_interrupt() || oops_in_progress)
  305. /*
  306. * Only unblank the console if we are called in enabled
  307. * context or a bust_spinlocks cleared the way for us.
  308. */
  309. console_unblank();
  310. _machine_power_off();
  311. }
  312. /*
  313. * Dummy power off function.
  314. */
  315. void (*pm_power_off)(void) = machine_power_off;
  316. static int __init early_parse_mem(char *p)
  317. {
  318. memory_end = memparse(p, &p);
  319. return 0;
  320. }
  321. early_param("mem", early_parse_mem);
  322. /*
  323. * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
  324. */
  325. static int __init early_parse_ipldelay(char *p)
  326. {
  327. unsigned long delay = 0;
  328. delay = simple_strtoul(p, &p, 0);
  329. switch (*p) {
  330. case 's':
  331. case 'S':
  332. delay *= 1000000;
  333. break;
  334. case 'm':
  335. case 'M':
  336. delay *= 60 * 1000000;
  337. }
  338. /* now wait for the requested amount of time */
  339. udelay(delay);
  340. return 0;
  341. }
  342. early_param("ipldelay", early_parse_ipldelay);
  343. #ifdef CONFIG_S390_SWITCH_AMODE
  344. unsigned int switch_amode = 0;
  345. EXPORT_SYMBOL_GPL(switch_amode);
  346. static void set_amode_and_uaccess(unsigned long user_amode,
  347. unsigned long user32_amode)
  348. {
  349. psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
  350. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  351. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  352. #ifdef CONFIG_COMPAT
  353. psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
  354. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  355. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  356. psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
  357. PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
  358. PSW32_MASK_PSTATE;
  359. #endif
  360. psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  361. PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
  362. if (MACHINE_HAS_MVCOS) {
  363. printk("mvcos available.\n");
  364. memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
  365. } else {
  366. printk("mvcos not available.\n");
  367. memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
  368. }
  369. }
  370. /*
  371. * Switch kernel/user addressing modes?
  372. */
  373. static int __init early_parse_switch_amode(char *p)
  374. {
  375. switch_amode = 1;
  376. return 0;
  377. }
  378. early_param("switch_amode", early_parse_switch_amode);
  379. #else /* CONFIG_S390_SWITCH_AMODE */
  380. static inline void set_amode_and_uaccess(unsigned long user_amode,
  381. unsigned long user32_amode)
  382. {
  383. }
  384. #endif /* CONFIG_S390_SWITCH_AMODE */
  385. #ifdef CONFIG_S390_EXEC_PROTECT
  386. unsigned int s390_noexec = 0;
  387. EXPORT_SYMBOL_GPL(s390_noexec);
  388. /*
  389. * Enable execute protection?
  390. */
  391. static int __init early_parse_noexec(char *p)
  392. {
  393. if (!strncmp(p, "off", 3))
  394. return 0;
  395. switch_amode = 1;
  396. s390_noexec = 1;
  397. return 0;
  398. }
  399. early_param("noexec", early_parse_noexec);
  400. #endif /* CONFIG_S390_EXEC_PROTECT */
  401. static void setup_addressing_mode(void)
  402. {
  403. if (s390_noexec) {
  404. printk("S390 execute protection active, ");
  405. set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
  406. return;
  407. }
  408. if (switch_amode) {
  409. printk("S390 address spaces switched, ");
  410. set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
  411. }
  412. }
  413. static void __init
  414. setup_lowcore(void)
  415. {
  416. struct _lowcore *lc;
  417. int lc_pages;
  418. /*
  419. * Setup lowcore for boot cpu
  420. */
  421. lc_pages = sizeof(void *) == 8 ? 2 : 1;
  422. lc = (struct _lowcore *)
  423. __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
  424. memset(lc, 0, lc_pages * PAGE_SIZE);
  425. lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
  426. lc->restart_psw.addr =
  427. PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
  428. if (switch_amode)
  429. lc->restart_psw.mask |= PSW_ASC_HOME;
  430. lc->external_new_psw.mask = psw_kernel_bits;
  431. lc->external_new_psw.addr =
  432. PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
  433. lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
  434. lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
  435. lc->program_new_psw.mask = psw_kernel_bits;
  436. lc->program_new_psw.addr =
  437. PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
  438. lc->mcck_new_psw.mask =
  439. psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
  440. lc->mcck_new_psw.addr =
  441. PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
  442. lc->io_new_psw.mask = psw_kernel_bits;
  443. lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
  444. lc->ipl_device = S390_lowcore.ipl_device;
  445. lc->jiffy_timer = -1LL;
  446. lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
  447. lc->async_stack = (unsigned long)
  448. __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
  449. lc->panic_stack = (unsigned long)
  450. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
  451. lc->current_task = (unsigned long) init_thread_union.thread_info.task;
  452. lc->thread_info = (unsigned long) &init_thread_union;
  453. #ifndef CONFIG_64BIT
  454. if (MACHINE_HAS_IEEE) {
  455. lc->extended_save_area_addr = (__u32)
  456. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
  457. /* enable extended save area */
  458. __ctl_set_bit(14, 29);
  459. }
  460. #endif
  461. set_prefix((u32)(unsigned long) lc);
  462. }
  463. static void __init
  464. setup_resources(void)
  465. {
  466. struct resource *res, *sub_res;
  467. int i;
  468. code_resource.start = (unsigned long) &_text;
  469. code_resource.end = (unsigned long) &_etext - 1;
  470. data_resource.start = (unsigned long) &_etext;
  471. data_resource.end = (unsigned long) &_edata - 1;
  472. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  473. res = alloc_bootmem_low(sizeof(struct resource));
  474. res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
  475. switch (memory_chunk[i].type) {
  476. case CHUNK_READ_WRITE:
  477. res->name = "System RAM";
  478. break;
  479. case CHUNK_READ_ONLY:
  480. res->name = "System ROM";
  481. res->flags |= IORESOURCE_READONLY;
  482. break;
  483. default:
  484. res->name = "reserved";
  485. }
  486. res->start = memory_chunk[i].addr;
  487. res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
  488. request_resource(&iomem_resource, res);
  489. if (code_resource.start >= res->start &&
  490. code_resource.start <= res->end &&
  491. code_resource.end > res->end) {
  492. sub_res = alloc_bootmem_low(sizeof(struct resource));
  493. memcpy(sub_res, &code_resource,
  494. sizeof(struct resource));
  495. sub_res->end = res->end;
  496. code_resource.start = res->end + 1;
  497. request_resource(res, sub_res);
  498. }
  499. if (code_resource.start >= res->start &&
  500. code_resource.start <= res->end &&
  501. code_resource.end <= res->end)
  502. request_resource(res, &code_resource);
  503. if (data_resource.start >= res->start &&
  504. data_resource.start <= res->end &&
  505. data_resource.end > res->end) {
  506. sub_res = alloc_bootmem_low(sizeof(struct resource));
  507. memcpy(sub_res, &data_resource,
  508. sizeof(struct resource));
  509. sub_res->end = res->end;
  510. data_resource.start = res->end + 1;
  511. request_resource(res, sub_res);
  512. }
  513. if (data_resource.start >= res->start &&
  514. data_resource.start <= res->end &&
  515. data_resource.end <= res->end)
  516. request_resource(res, &data_resource);
  517. }
  518. }
  519. static void __init setup_memory_end(void)
  520. {
  521. unsigned long real_size, memory_size;
  522. unsigned long max_mem, max_phys;
  523. int i;
  524. memory_size = real_size = 0;
  525. max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
  526. memory_end &= PAGE_MASK;
  527. max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
  528. for (i = 0; i < MEMORY_CHUNKS; i++) {
  529. struct mem_chunk *chunk = &memory_chunk[i];
  530. real_size = max(real_size, chunk->addr + chunk->size);
  531. if (chunk->addr >= max_mem) {
  532. memset(chunk, 0, sizeof(*chunk));
  533. continue;
  534. }
  535. if (chunk->addr + chunk->size > max_mem)
  536. chunk->size = max_mem - chunk->addr;
  537. memory_size = max(memory_size, chunk->addr + chunk->size);
  538. }
  539. if (!memory_end)
  540. memory_end = memory_size;
  541. }
  542. static void __init
  543. setup_memory(void)
  544. {
  545. unsigned long bootmap_size;
  546. unsigned long start_pfn, end_pfn;
  547. int i;
  548. /*
  549. * partially used pages are not usable - thus
  550. * we are rounding upwards:
  551. */
  552. start_pfn = PFN_UP(__pa(&_end));
  553. end_pfn = max_pfn = PFN_DOWN(memory_end);
  554. #ifdef CONFIG_BLK_DEV_INITRD
  555. /*
  556. * Move the initrd in case the bitmap of the bootmem allocater
  557. * would overwrite it.
  558. */
  559. if (INITRD_START && INITRD_SIZE) {
  560. unsigned long bmap_size;
  561. unsigned long start;
  562. bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
  563. bmap_size = PFN_PHYS(bmap_size);
  564. if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
  565. start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
  566. if (start + INITRD_SIZE > memory_end) {
  567. printk("initrd extends beyond end of memory "
  568. "(0x%08lx > 0x%08lx)\n"
  569. "disabling initrd\n",
  570. start + INITRD_SIZE, memory_end);
  571. INITRD_START = INITRD_SIZE = 0;
  572. } else {
  573. printk("Moving initrd (0x%08lx -> 0x%08lx, "
  574. "size: %ld)\n",
  575. INITRD_START, start, INITRD_SIZE);
  576. memmove((void *) start, (void *) INITRD_START,
  577. INITRD_SIZE);
  578. INITRD_START = start;
  579. }
  580. }
  581. }
  582. #endif
  583. /*
  584. * Initialize the boot-time allocator
  585. */
  586. bootmap_size = init_bootmem(start_pfn, end_pfn);
  587. /*
  588. * Register RAM areas with the bootmem allocator.
  589. */
  590. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  591. unsigned long start_chunk, end_chunk, pfn;
  592. if (memory_chunk[i].type != CHUNK_READ_WRITE)
  593. continue;
  594. start_chunk = PFN_DOWN(memory_chunk[i].addr);
  595. end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
  596. end_chunk = min(end_chunk, end_pfn);
  597. if (start_chunk >= end_chunk)
  598. continue;
  599. add_active_range(0, start_chunk, end_chunk);
  600. pfn = max(start_chunk, start_pfn);
  601. for (; pfn <= end_chunk; pfn++)
  602. page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
  603. }
  604. psw_set_key(PAGE_DEFAULT_KEY);
  605. free_bootmem_with_active_regions(0, max_pfn);
  606. /*
  607. * Reserve memory used for lowcore/command line/kernel image.
  608. */
  609. reserve_bootmem(0, (unsigned long)_ehead);
  610. reserve_bootmem((unsigned long)_stext,
  611. PFN_PHYS(start_pfn) - (unsigned long)_stext);
  612. /*
  613. * Reserve the bootmem bitmap itself as well. We do this in two
  614. * steps (first step was init_bootmem()) because this catches
  615. * the (very unlikely) case of us accidentally initializing the
  616. * bootmem allocator with an invalid RAM area.
  617. */
  618. reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
  619. #ifdef CONFIG_BLK_DEV_INITRD
  620. if (INITRD_START && INITRD_SIZE) {
  621. if (INITRD_START + INITRD_SIZE <= memory_end) {
  622. reserve_bootmem(INITRD_START, INITRD_SIZE);
  623. initrd_start = INITRD_START;
  624. initrd_end = initrd_start + INITRD_SIZE;
  625. } else {
  626. printk("initrd extends beyond end of memory "
  627. "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
  628. initrd_start + INITRD_SIZE, memory_end);
  629. initrd_start = initrd_end = 0;
  630. }
  631. }
  632. #endif
  633. }
  634. /*
  635. * Setup function called from init/main.c just after the banner
  636. * was printed.
  637. */
  638. void __init
  639. setup_arch(char **cmdline_p)
  640. {
  641. /*
  642. * print what head.S has found out about the machine
  643. */
  644. #ifndef CONFIG_64BIT
  645. printk((MACHINE_IS_VM) ?
  646. "We are running under VM (31 bit mode)\n" :
  647. "We are running native (31 bit mode)\n");
  648. printk((MACHINE_HAS_IEEE) ?
  649. "This machine has an IEEE fpu\n" :
  650. "This machine has no IEEE fpu\n");
  651. #else /* CONFIG_64BIT */
  652. printk((MACHINE_IS_VM) ?
  653. "We are running under VM (64 bit mode)\n" :
  654. "We are running native (64 bit mode)\n");
  655. #endif /* CONFIG_64BIT */
  656. /* Save unparsed command line copy for /proc/cmdline */
  657. strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
  658. *cmdline_p = COMMAND_LINE;
  659. *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
  660. ROOT_DEV = Root_RAM0;
  661. init_mm.start_code = PAGE_OFFSET;
  662. init_mm.end_code = (unsigned long) &_etext;
  663. init_mm.end_data = (unsigned long) &_edata;
  664. init_mm.brk = (unsigned long) &_end;
  665. if (MACHINE_HAS_MVCOS)
  666. memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
  667. else
  668. memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
  669. parse_early_param();
  670. setup_memory_end();
  671. setup_addressing_mode();
  672. setup_memory();
  673. setup_resources();
  674. setup_lowcore();
  675. cpu_init();
  676. __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
  677. smp_setup_cpu_possible_map();
  678. /*
  679. * Create kernel page tables and switch to virtual addressing.
  680. */
  681. paging_init();
  682. /* Setup default console */
  683. conmode_default();
  684. }
  685. void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
  686. {
  687. printk("cpu %d "
  688. #ifdef CONFIG_SMP
  689. "phys_idx=%d "
  690. #endif
  691. "vers=%02X ident=%06X machine=%04X unused=%04X\n",
  692. cpuinfo->cpu_nr,
  693. #ifdef CONFIG_SMP
  694. cpuinfo->cpu_addr,
  695. #endif
  696. cpuinfo->cpu_id.version,
  697. cpuinfo->cpu_id.ident,
  698. cpuinfo->cpu_id.machine,
  699. cpuinfo->cpu_id.unused);
  700. }
  701. /*
  702. * show_cpuinfo - Get information on one CPU for use by procfs.
  703. */
  704. static int show_cpuinfo(struct seq_file *m, void *v)
  705. {
  706. struct cpuinfo_S390 *cpuinfo;
  707. unsigned long n = (unsigned long) v - 1;
  708. s390_adjust_jiffies();
  709. preempt_disable();
  710. if (!n) {
  711. seq_printf(m, "vendor_id : IBM/S390\n"
  712. "# processors : %i\n"
  713. "bogomips per cpu: %lu.%02lu\n",
  714. num_online_cpus(), loops_per_jiffy/(500000/HZ),
  715. (loops_per_jiffy/(5000/HZ))%100);
  716. }
  717. if (cpu_online(n)) {
  718. #ifdef CONFIG_SMP
  719. if (smp_processor_id() == n)
  720. cpuinfo = &S390_lowcore.cpu_data;
  721. else
  722. cpuinfo = &lowcore_ptr[n]->cpu_data;
  723. #else
  724. cpuinfo = &S390_lowcore.cpu_data;
  725. #endif
  726. seq_printf(m, "processor %li: "
  727. "version = %02X, "
  728. "identification = %06X, "
  729. "machine = %04X\n",
  730. n, cpuinfo->cpu_id.version,
  731. cpuinfo->cpu_id.ident,
  732. cpuinfo->cpu_id.machine);
  733. }
  734. preempt_enable();
  735. return 0;
  736. }
  737. static void *c_start(struct seq_file *m, loff_t *pos)
  738. {
  739. return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
  740. }
  741. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  742. {
  743. ++*pos;
  744. return c_start(m, pos);
  745. }
  746. static void c_stop(struct seq_file *m, void *v)
  747. {
  748. }
  749. struct seq_operations cpuinfo_op = {
  750. .start = c_start,
  751. .next = c_next,
  752. .stop = c_stop,
  753. .show = show_cpuinfo,
  754. };