setup.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /*
  2. * arch/s390/kernel/setup.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com),
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "arch/i386/kernel/setup.c"
  10. * Copyright (C) 1995, Linus Torvalds
  11. */
  12. /*
  13. * This file handles the architecture-dependent parts of initialization
  14. */
  15. #define KMSG_COMPONENT "setup"
  16. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  17. #include <linux/errno.h>
  18. #include <linux/module.h>
  19. #include <linux/sched.h>
  20. #include <linux/kernel.h>
  21. #include <linux/mm.h>
  22. #include <linux/stddef.h>
  23. #include <linux/unistd.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/slab.h>
  26. #include <linux/user.h>
  27. #include <linux/tty.h>
  28. #include <linux/ioport.h>
  29. #include <linux/delay.h>
  30. #include <linux/init.h>
  31. #include <linux/initrd.h>
  32. #include <linux/bootmem.h>
  33. #include <linux/root_dev.h>
  34. #include <linux/console.h>
  35. #include <linux/kernel_stat.h>
  36. #include <linux/device.h>
  37. #include <linux/notifier.h>
  38. #include <linux/pfn.h>
  39. #include <linux/ctype.h>
  40. #include <linux/reboot.h>
  41. #include <linux/topology.h>
  42. #include <linux/ftrace.h>
  43. #include <asm/ipl.h>
  44. #include <asm/uaccess.h>
  45. #include <asm/system.h>
  46. #include <asm/smp.h>
  47. #include <asm/mmu_context.h>
  48. #include <asm/cpcmd.h>
  49. #include <asm/lowcore.h>
  50. #include <asm/irq.h>
  51. #include <asm/page.h>
  52. #include <asm/ptrace.h>
  53. #include <asm/sections.h>
  54. #include <asm/ebcdic.h>
  55. #include <asm/compat.h>
  56. #include <asm/kvm_virtio.h>
  57. long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
  58. PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
  59. long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  60. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  61. PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  62. /*
  63. * User copy operations.
  64. */
  65. struct uaccess_ops uaccess;
  66. EXPORT_SYMBOL(uaccess);
  67. /*
  68. * Machine setup..
  69. */
  70. unsigned int console_mode = 0;
  71. EXPORT_SYMBOL(console_mode);
  72. unsigned int console_devno = -1;
  73. EXPORT_SYMBOL(console_devno);
  74. unsigned int console_irq = -1;
  75. EXPORT_SYMBOL(console_irq);
  76. unsigned long elf_hwcap = 0;
  77. char elf_platform[ELF_PLATFORM_SIZE];
  78. struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
  79. volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  80. int __initdata memory_end_set;
  81. unsigned long __initdata memory_end;
  82. /* An array with a pointer to the lowcore of every CPU. */
  83. struct _lowcore *lowcore_ptr[NR_CPUS];
  84. EXPORT_SYMBOL(lowcore_ptr);
  85. /*
  86. * This is set up by the setup-routine at boot-time
  87. * for S390 need to find out, what we have to setup
  88. * using address 0x10400 ...
  89. */
  90. #include <asm/setup.h>
  91. static struct resource code_resource = {
  92. .name = "Kernel code",
  93. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  94. };
  95. static struct resource data_resource = {
  96. .name = "Kernel data",
  97. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  98. };
  99. /*
  100. * cpu_init() initializes state that is per-CPU.
  101. */
  102. void __cpuinit cpu_init(void)
  103. {
  104. /*
  105. * Store processor id in lowcore (used e.g. in timer_interrupt)
  106. */
  107. get_cpu_id(&S390_lowcore.cpu_id);
  108. /*
  109. * Force FPU initialization:
  110. */
  111. clear_thread_flag(TIF_USEDFPU);
  112. clear_used_math();
  113. atomic_inc(&init_mm.mm_count);
  114. current->active_mm = &init_mm;
  115. BUG_ON(current->mm);
  116. enter_lazy_tlb(&init_mm, current);
  117. }
  118. /*
  119. * condev= and conmode= setup parameter.
  120. */
  121. static int __init condev_setup(char *str)
  122. {
  123. int vdev;
  124. vdev = simple_strtoul(str, &str, 0);
  125. if (vdev >= 0 && vdev < 65536) {
  126. console_devno = vdev;
  127. console_irq = -1;
  128. }
  129. return 1;
  130. }
  131. __setup("condev=", condev_setup);
  132. static void __init set_preferred_console(void)
  133. {
  134. if (MACHINE_IS_KVM) {
  135. add_preferred_console("hvc", 0, NULL);
  136. s390_virtio_console_init();
  137. return;
  138. }
  139. if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
  140. add_preferred_console("ttyS", 0, NULL);
  141. if (CONSOLE_IS_3270)
  142. add_preferred_console("tty3270", 0, NULL);
  143. }
  144. static int __init conmode_setup(char *str)
  145. {
  146. #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  147. if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
  148. SET_CONSOLE_SCLP;
  149. #endif
  150. #if defined(CONFIG_TN3215_CONSOLE)
  151. if (strncmp(str, "3215", 5) == 0)
  152. SET_CONSOLE_3215;
  153. #endif
  154. #if defined(CONFIG_TN3270_CONSOLE)
  155. if (strncmp(str, "3270", 5) == 0)
  156. SET_CONSOLE_3270;
  157. #endif
  158. set_preferred_console();
  159. return 1;
  160. }
  161. __setup("conmode=", conmode_setup);
  162. static void __init conmode_default(void)
  163. {
  164. char query_buffer[1024];
  165. char *ptr;
  166. if (MACHINE_IS_VM) {
  167. cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
  168. console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
  169. ptr = strstr(query_buffer, "SUBCHANNEL =");
  170. console_irq = simple_strtoul(ptr + 13, NULL, 16);
  171. cpcmd("QUERY TERM", query_buffer, 1024, NULL);
  172. ptr = strstr(query_buffer, "CONMODE");
  173. /*
  174. * Set the conmode to 3215 so that the device recognition
  175. * will set the cu_type of the console to 3215. If the
  176. * conmode is 3270 and we don't set it back then both
  177. * 3215 and the 3270 driver will try to access the console
  178. * device (3215 as console and 3270 as normal tty).
  179. */
  180. cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
  181. if (ptr == NULL) {
  182. #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  183. SET_CONSOLE_SCLP;
  184. #endif
  185. return;
  186. }
  187. if (strncmp(ptr + 8, "3270", 4) == 0) {
  188. #if defined(CONFIG_TN3270_CONSOLE)
  189. SET_CONSOLE_3270;
  190. #elif defined(CONFIG_TN3215_CONSOLE)
  191. SET_CONSOLE_3215;
  192. #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  193. SET_CONSOLE_SCLP;
  194. #endif
  195. } else if (strncmp(ptr + 8, "3215", 4) == 0) {
  196. #if defined(CONFIG_TN3215_CONSOLE)
  197. SET_CONSOLE_3215;
  198. #elif defined(CONFIG_TN3270_CONSOLE)
  199. SET_CONSOLE_3270;
  200. #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  201. SET_CONSOLE_SCLP;
  202. #endif
  203. }
  204. } else {
  205. #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  206. SET_CONSOLE_SCLP;
  207. #endif
  208. }
  209. }
  210. #ifdef CONFIG_ZFCPDUMP
  211. static void __init setup_zfcpdump(unsigned int console_devno)
  212. {
  213. static char str[41];
  214. if (ipl_info.type != IPL_TYPE_FCP_DUMP)
  215. return;
  216. if (console_devno != -1)
  217. sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
  218. ipl_info.data.fcp.dev_id.devno, console_devno);
  219. else
  220. sprintf(str, " cio_ignore=all,!0.0.%04x",
  221. ipl_info.data.fcp.dev_id.devno);
  222. strcat(boot_command_line, str);
  223. console_loglevel = 2;
  224. }
  225. #else
  226. static inline void setup_zfcpdump(unsigned int console_devno) {}
  227. #endif /* CONFIG_ZFCPDUMP */
  228. /*
  229. * Reboot, halt and power_off stubs. They just call _machine_restart,
  230. * _machine_halt or _machine_power_off.
  231. */
  232. void machine_restart(char *command)
  233. {
  234. if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
  235. /*
  236. * Only unblank the console if we are called in enabled
  237. * context or a bust_spinlocks cleared the way for us.
  238. */
  239. console_unblank();
  240. _machine_restart(command);
  241. }
  242. void machine_halt(void)
  243. {
  244. if (!in_interrupt() || oops_in_progress)
  245. /*
  246. * Only unblank the console if we are called in enabled
  247. * context or a bust_spinlocks cleared the way for us.
  248. */
  249. console_unblank();
  250. _machine_halt();
  251. }
  252. void machine_power_off(void)
  253. {
  254. if (!in_interrupt() || oops_in_progress)
  255. /*
  256. * Only unblank the console if we are called in enabled
  257. * context or a bust_spinlocks cleared the way for us.
  258. */
  259. console_unblank();
  260. _machine_power_off();
  261. }
  262. /*
  263. * Dummy power off function.
  264. */
  265. void (*pm_power_off)(void) = machine_power_off;
  266. static int __init early_parse_mem(char *p)
  267. {
  268. memory_end = memparse(p, &p);
  269. memory_end_set = 1;
  270. return 0;
  271. }
  272. early_param("mem", early_parse_mem);
  273. #ifdef CONFIG_S390_SWITCH_AMODE
  274. unsigned int switch_amode = 0;
  275. EXPORT_SYMBOL_GPL(switch_amode);
  276. static int set_amode_and_uaccess(unsigned long user_amode,
  277. unsigned long user32_amode)
  278. {
  279. psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
  280. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  281. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  282. #ifdef CONFIG_COMPAT
  283. psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
  284. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  285. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  286. psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
  287. PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
  288. PSW32_MASK_PSTATE;
  289. #endif
  290. psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  291. PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
  292. if (MACHINE_HAS_MVCOS) {
  293. memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
  294. return 1;
  295. } else {
  296. memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
  297. return 0;
  298. }
  299. }
  300. /*
  301. * Switch kernel/user addressing modes?
  302. */
  303. static int __init early_parse_switch_amode(char *p)
  304. {
  305. switch_amode = 1;
  306. return 0;
  307. }
  308. early_param("switch_amode", early_parse_switch_amode);
  309. #else /* CONFIG_S390_SWITCH_AMODE */
  310. static inline int set_amode_and_uaccess(unsigned long user_amode,
  311. unsigned long user32_amode)
  312. {
  313. return 0;
  314. }
  315. #endif /* CONFIG_S390_SWITCH_AMODE */
  316. #ifdef CONFIG_S390_EXEC_PROTECT
  317. unsigned int s390_noexec = 0;
  318. EXPORT_SYMBOL_GPL(s390_noexec);
  319. /*
  320. * Enable execute protection?
  321. */
  322. static int __init early_parse_noexec(char *p)
  323. {
  324. if (!strncmp(p, "off", 3))
  325. return 0;
  326. switch_amode = 1;
  327. s390_noexec = 1;
  328. return 0;
  329. }
  330. early_param("noexec", early_parse_noexec);
  331. #endif /* CONFIG_S390_EXEC_PROTECT */
  332. static void setup_addressing_mode(void)
  333. {
  334. if (s390_noexec) {
  335. if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
  336. PSW32_ASC_SECONDARY))
  337. pr_info("Execute protection active, "
  338. "mvcos available\n");
  339. else
  340. pr_info("Execute protection active, "
  341. "mvcos not available\n");
  342. } else if (switch_amode) {
  343. if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
  344. pr_info("Address spaces switched, "
  345. "mvcos available\n");
  346. else
  347. pr_info("Address spaces switched, "
  348. "mvcos not available\n");
  349. }
  350. #ifdef CONFIG_TRACE_IRQFLAGS
  351. sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
  352. io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
  353. #endif
  354. }
  355. static void __init
  356. setup_lowcore(void)
  357. {
  358. struct _lowcore *lc;
  359. int lc_pages;
  360. /*
  361. * Setup lowcore for boot cpu
  362. */
  363. lc_pages = sizeof(void *) == 8 ? 2 : 1;
  364. lc = (struct _lowcore *)
  365. __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
  366. memset(lc, 0, lc_pages * PAGE_SIZE);
  367. lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
  368. lc->restart_psw.addr =
  369. PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
  370. if (switch_amode)
  371. lc->restart_psw.mask |= PSW_ASC_HOME;
  372. lc->external_new_psw.mask = psw_kernel_bits;
  373. lc->external_new_psw.addr =
  374. PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
  375. lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
  376. lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
  377. lc->program_new_psw.mask = psw_kernel_bits;
  378. lc->program_new_psw.addr =
  379. PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
  380. lc->mcck_new_psw.mask =
  381. psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
  382. lc->mcck_new_psw.addr =
  383. PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
  384. lc->io_new_psw.mask = psw_kernel_bits;
  385. lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
  386. lc->clock_comparator = -1ULL;
  387. lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
  388. lc->async_stack = (unsigned long)
  389. __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
  390. lc->panic_stack = (unsigned long)
  391. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
  392. lc->current_task = (unsigned long) init_thread_union.thread_info.task;
  393. lc->thread_info = (unsigned long) &init_thread_union;
  394. lc->machine_flags = S390_lowcore.machine_flags;
  395. #ifndef CONFIG_64BIT
  396. if (MACHINE_HAS_IEEE) {
  397. lc->extended_save_area_addr = (__u32)
  398. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
  399. /* enable extended save area */
  400. __ctl_set_bit(14, 29);
  401. }
  402. #else
  403. lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
  404. #endif
  405. lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
  406. lc->async_enter_timer = S390_lowcore.async_enter_timer;
  407. lc->exit_timer = S390_lowcore.exit_timer;
  408. lc->user_timer = S390_lowcore.user_timer;
  409. lc->system_timer = S390_lowcore.system_timer;
  410. lc->steal_timer = S390_lowcore.steal_timer;
  411. lc->last_update_timer = S390_lowcore.last_update_timer;
  412. lc->last_update_clock = S390_lowcore.last_update_clock;
  413. lc->ftrace_func = S390_lowcore.ftrace_func;
  414. set_prefix((u32)(unsigned long) lc);
  415. lowcore_ptr[0] = lc;
  416. }
  417. static void __init
  418. setup_resources(void)
  419. {
  420. struct resource *res, *sub_res;
  421. int i;
  422. code_resource.start = (unsigned long) &_text;
  423. code_resource.end = (unsigned long) &_etext - 1;
  424. data_resource.start = (unsigned long) &_etext;
  425. data_resource.end = (unsigned long) &_edata - 1;
  426. for (i = 0; i < MEMORY_CHUNKS; i++) {
  427. if (!memory_chunk[i].size)
  428. continue;
  429. res = alloc_bootmem_low(sizeof(struct resource));
  430. res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
  431. switch (memory_chunk[i].type) {
  432. case CHUNK_READ_WRITE:
  433. res->name = "System RAM";
  434. break;
  435. case CHUNK_READ_ONLY:
  436. res->name = "System ROM";
  437. res->flags |= IORESOURCE_READONLY;
  438. break;
  439. default:
  440. res->name = "reserved";
  441. }
  442. res->start = memory_chunk[i].addr;
  443. res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
  444. request_resource(&iomem_resource, res);
  445. if (code_resource.start >= res->start &&
  446. code_resource.start <= res->end &&
  447. code_resource.end > res->end) {
  448. sub_res = alloc_bootmem_low(sizeof(struct resource));
  449. memcpy(sub_res, &code_resource,
  450. sizeof(struct resource));
  451. sub_res->end = res->end;
  452. code_resource.start = res->end + 1;
  453. request_resource(res, sub_res);
  454. }
  455. if (code_resource.start >= res->start &&
  456. code_resource.start <= res->end &&
  457. code_resource.end <= res->end)
  458. request_resource(res, &code_resource);
  459. if (data_resource.start >= res->start &&
  460. data_resource.start <= res->end &&
  461. data_resource.end > res->end) {
  462. sub_res = alloc_bootmem_low(sizeof(struct resource));
  463. memcpy(sub_res, &data_resource,
  464. sizeof(struct resource));
  465. sub_res->end = res->end;
  466. data_resource.start = res->end + 1;
  467. request_resource(res, sub_res);
  468. }
  469. if (data_resource.start >= res->start &&
  470. data_resource.start <= res->end &&
  471. data_resource.end <= res->end)
  472. request_resource(res, &data_resource);
  473. }
  474. }
  475. unsigned long real_memory_size;
  476. EXPORT_SYMBOL_GPL(real_memory_size);
  477. static void __init setup_memory_end(void)
  478. {
  479. unsigned long memory_size;
  480. unsigned long max_mem;
  481. int i;
  482. #ifdef CONFIG_ZFCPDUMP
  483. if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
  484. memory_end = ZFCPDUMP_HSA_SIZE;
  485. memory_end_set = 1;
  486. }
  487. #endif
  488. memory_size = 0;
  489. memory_end &= PAGE_MASK;
  490. max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
  491. memory_end = min(max_mem, memory_end);
  492. /*
  493. * Make sure all chunks are MAX_ORDER aligned so we don't need the
  494. * extra checks that HOLES_IN_ZONE would require.
  495. */
  496. for (i = 0; i < MEMORY_CHUNKS; i++) {
  497. unsigned long start, end;
  498. struct mem_chunk *chunk;
  499. unsigned long align;
  500. chunk = &memory_chunk[i];
  501. align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
  502. start = (chunk->addr + align - 1) & ~(align - 1);
  503. end = (chunk->addr + chunk->size) & ~(align - 1);
  504. if (start >= end)
  505. memset(chunk, 0, sizeof(*chunk));
  506. else {
  507. chunk->addr = start;
  508. chunk->size = end - start;
  509. }
  510. }
  511. for (i = 0; i < MEMORY_CHUNKS; i++) {
  512. struct mem_chunk *chunk = &memory_chunk[i];
  513. real_memory_size = max(real_memory_size,
  514. chunk->addr + chunk->size);
  515. if (chunk->addr >= max_mem) {
  516. memset(chunk, 0, sizeof(*chunk));
  517. continue;
  518. }
  519. if (chunk->addr + chunk->size > max_mem)
  520. chunk->size = max_mem - chunk->addr;
  521. memory_size = max(memory_size, chunk->addr + chunk->size);
  522. }
  523. if (!memory_end)
  524. memory_end = memory_size;
  525. }
  526. static void __init
  527. setup_memory(void)
  528. {
  529. unsigned long bootmap_size;
  530. unsigned long start_pfn, end_pfn;
  531. int i;
  532. /*
  533. * partially used pages are not usable - thus
  534. * we are rounding upwards:
  535. */
  536. start_pfn = PFN_UP(__pa(&_end));
  537. end_pfn = max_pfn = PFN_DOWN(memory_end);
  538. #ifdef CONFIG_BLK_DEV_INITRD
  539. /*
  540. * Move the initrd in case the bitmap of the bootmem allocater
  541. * would overwrite it.
  542. */
  543. if (INITRD_START && INITRD_SIZE) {
  544. unsigned long bmap_size;
  545. unsigned long start;
  546. bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
  547. bmap_size = PFN_PHYS(bmap_size);
  548. if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
  549. start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
  550. if (start + INITRD_SIZE > memory_end) {
  551. pr_err("initrd extends beyond end of "
  552. "memory (0x%08lx > 0x%08lx) "
  553. "disabling initrd\n",
  554. start + INITRD_SIZE, memory_end);
  555. INITRD_START = INITRD_SIZE = 0;
  556. } else {
  557. pr_info("Moving initrd (0x%08lx -> "
  558. "0x%08lx, size: %ld)\n",
  559. INITRD_START, start, INITRD_SIZE);
  560. memmove((void *) start, (void *) INITRD_START,
  561. INITRD_SIZE);
  562. INITRD_START = start;
  563. }
  564. }
  565. }
  566. #endif
  567. /*
  568. * Initialize the boot-time allocator
  569. */
  570. bootmap_size = init_bootmem(start_pfn, end_pfn);
  571. /*
  572. * Register RAM areas with the bootmem allocator.
  573. */
  574. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  575. unsigned long start_chunk, end_chunk, pfn;
  576. if (memory_chunk[i].type != CHUNK_READ_WRITE)
  577. continue;
  578. start_chunk = PFN_DOWN(memory_chunk[i].addr);
  579. end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
  580. end_chunk = min(end_chunk, end_pfn);
  581. if (start_chunk >= end_chunk)
  582. continue;
  583. add_active_range(0, start_chunk, end_chunk);
  584. pfn = max(start_chunk, start_pfn);
  585. for (; pfn < end_chunk; pfn++)
  586. page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
  587. }
  588. psw_set_key(PAGE_DEFAULT_KEY);
  589. free_bootmem_with_active_regions(0, max_pfn);
  590. /*
  591. * Reserve memory used for lowcore/command line/kernel image.
  592. */
  593. reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
  594. reserve_bootmem((unsigned long)_stext,
  595. PFN_PHYS(start_pfn) - (unsigned long)_stext,
  596. BOOTMEM_DEFAULT);
  597. /*
  598. * Reserve the bootmem bitmap itself as well. We do this in two
  599. * steps (first step was init_bootmem()) because this catches
  600. * the (very unlikely) case of us accidentally initializing the
  601. * bootmem allocator with an invalid RAM area.
  602. */
  603. reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
  604. BOOTMEM_DEFAULT);
  605. #ifdef CONFIG_BLK_DEV_INITRD
  606. if (INITRD_START && INITRD_SIZE) {
  607. if (INITRD_START + INITRD_SIZE <= memory_end) {
  608. reserve_bootmem(INITRD_START, INITRD_SIZE,
  609. BOOTMEM_DEFAULT);
  610. initrd_start = INITRD_START;
  611. initrd_end = initrd_start + INITRD_SIZE;
  612. } else {
  613. pr_err("initrd extends beyond end of "
  614. "memory (0x%08lx > 0x%08lx) "
  615. "disabling initrd\n",
  616. initrd_start + INITRD_SIZE, memory_end);
  617. initrd_start = initrd_end = 0;
  618. }
  619. }
  620. #endif
  621. }
  622. /*
  623. * Setup hardware capabilities.
  624. */
  625. static void __init setup_hwcaps(void)
  626. {
  627. static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
  628. unsigned long long facility_list_extended;
  629. unsigned int facility_list;
  630. int i;
  631. facility_list = stfl();
  632. /*
  633. * The store facility list bits numbers as found in the principles
  634. * of operation are numbered with bit 1UL<<31 as number 0 to
  635. * bit 1UL<<0 as number 31.
  636. * Bit 0: instructions named N3, "backported" to esa-mode
  637. * Bit 2: z/Architecture mode is active
  638. * Bit 7: the store-facility-list-extended facility is installed
  639. * Bit 17: the message-security assist is installed
  640. * Bit 19: the long-displacement facility is installed
  641. * Bit 21: the extended-immediate facility is installed
  642. * Bit 22: extended-translation facility 3 is installed
  643. * Bit 30: extended-translation facility 3 enhancement facility
  644. * These get translated to:
  645. * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
  646. * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
  647. * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
  648. * HWCAP_S390_ETF3EH bit 8 (22 && 30).
  649. */
  650. for (i = 0; i < 6; i++)
  651. if (facility_list & (1UL << (31 - stfl_bits[i])))
  652. elf_hwcap |= 1UL << i;
  653. if ((facility_list & (1UL << (31 - 22)))
  654. && (facility_list & (1UL << (31 - 30))))
  655. elf_hwcap |= 1UL << 8;
  656. /*
  657. * Check for additional facilities with store-facility-list-extended.
  658. * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
  659. * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
  660. * as stored by stfl, bits 32-xxx contain additional facilities.
  661. * How many facility words are stored depends on the number of
  662. * doublewords passed to the instruction. The additional facilites
  663. * are:
  664. * Bit 42: decimal floating point facility is installed
  665. * Bit 44: perform floating point operation facility is installed
  666. * translated to:
  667. * HWCAP_S390_DFP bit 6 (42 && 44).
  668. */
  669. if ((elf_hwcap & (1UL << 2)) &&
  670. __stfle(&facility_list_extended, 1) > 0) {
  671. if ((facility_list_extended & (1ULL << (63 - 42)))
  672. && (facility_list_extended & (1ULL << (63 - 44))))
  673. elf_hwcap |= 1UL << 6;
  674. }
  675. if (MACHINE_HAS_HPAGE)
  676. elf_hwcap |= 1UL << 7;
  677. switch (S390_lowcore.cpu_id.machine) {
  678. case 0x9672:
  679. #if !defined(CONFIG_64BIT)
  680. default: /* Use "g5" as default for 31 bit kernels. */
  681. #endif
  682. strcpy(elf_platform, "g5");
  683. break;
  684. case 0x2064:
  685. case 0x2066:
  686. #if defined(CONFIG_64BIT)
  687. default: /* Use "z900" as default for 64 bit kernels. */
  688. #endif
  689. strcpy(elf_platform, "z900");
  690. break;
  691. case 0x2084:
  692. case 0x2086:
  693. strcpy(elf_platform, "z990");
  694. break;
  695. case 0x2094:
  696. case 0x2096:
  697. strcpy(elf_platform, "z9-109");
  698. break;
  699. case 0x2097:
  700. case 0x2098:
  701. strcpy(elf_platform, "z10");
  702. break;
  703. }
  704. }
  705. /*
  706. * Setup function called from init/main.c just after the banner
  707. * was printed.
  708. */
  709. void __init
  710. setup_arch(char **cmdline_p)
  711. {
  712. /*
  713. * print what head.S has found out about the machine
  714. */
  715. #ifndef CONFIG_64BIT
  716. if (MACHINE_IS_VM)
  717. pr_info("Linux is running as a z/VM "
  718. "guest operating system in 31-bit mode\n");
  719. else
  720. pr_info("Linux is running natively in 31-bit mode\n");
  721. if (MACHINE_HAS_IEEE)
  722. pr_info("The hardware system has IEEE compatible "
  723. "floating point units\n");
  724. else
  725. pr_info("The hardware system has no IEEE compatible "
  726. "floating point units\n");
  727. #else /* CONFIG_64BIT */
  728. if (MACHINE_IS_VM)
  729. pr_info("Linux is running as a z/VM "
  730. "guest operating system in 64-bit mode\n");
  731. else if (MACHINE_IS_KVM)
  732. pr_info("Linux is running under KVM in 64-bit mode\n");
  733. else
  734. pr_info("Linux is running natively in 64-bit mode\n");
  735. #endif /* CONFIG_64BIT */
  736. /* Have one command line that is parsed and saved in /proc/cmdline */
  737. /* boot_command_line has been already set up in early.c */
  738. *cmdline_p = boot_command_line;
  739. ROOT_DEV = Root_RAM0;
  740. init_mm.start_code = PAGE_OFFSET;
  741. init_mm.end_code = (unsigned long) &_etext;
  742. init_mm.end_data = (unsigned long) &_edata;
  743. init_mm.brk = (unsigned long) &_end;
  744. if (MACHINE_HAS_MVCOS)
  745. memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
  746. else
  747. memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
  748. parse_early_param();
  749. setup_ipl();
  750. setup_memory_end();
  751. setup_addressing_mode();
  752. setup_memory();
  753. setup_resources();
  754. setup_lowcore();
  755. cpu_init();
  756. __cpu_logical_map[0] = stap();
  757. s390_init_cpu_topology();
  758. /*
  759. * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
  760. */
  761. setup_hwcaps();
  762. /*
  763. * Create kernel page tables and switch to virtual addressing.
  764. */
  765. paging_init();
  766. /* Setup default console */
  767. conmode_default();
  768. set_preferred_console();
  769. /* Setup zfcpdump support */
  770. setup_zfcpdump(console_devno);
  771. }