setup.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. /*
  2. * arch/s390/kernel/setup.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com),
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "arch/i386/kernel/setup.c"
  10. * Copyright (C) 1995, Linus Torvalds
  11. */
  12. /*
  13. * This file handles the architecture-dependent parts of initialization
  14. */
  15. #include <linux/errno.h>
  16. #include <linux/module.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/stddef.h>
  21. #include <linux/unistd.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/slab.h>
  24. #include <linux/user.h>
  25. #include <linux/a.out.h>
  26. #include <linux/tty.h>
  27. #include <linux/ioport.h>
  28. #include <linux/delay.h>
  29. #include <linux/init.h>
  30. #include <linux/initrd.h>
  31. #include <linux/bootmem.h>
  32. #include <linux/root_dev.h>
  33. #include <linux/console.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/kernel_stat.h>
  36. #include <linux/device.h>
  37. #include <linux/notifier.h>
  38. #include <linux/pfn.h>
  39. #include <linux/ctype.h>
  40. #include <linux/reboot.h>
  41. #include <asm/ipl.h>
  42. #include <asm/uaccess.h>
  43. #include <asm/system.h>
  44. #include <asm/smp.h>
  45. #include <asm/mmu_context.h>
  46. #include <asm/cpcmd.h>
  47. #include <asm/lowcore.h>
  48. #include <asm/irq.h>
  49. #include <asm/page.h>
  50. #include <asm/ptrace.h>
  51. #include <asm/sections.h>
  52. #include <asm/ebcdic.h>
  53. #include <asm/compat.h>
  54. long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
  55. PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
  56. long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  57. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  58. PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  59. /*
  60. * User copy operations.
  61. */
  62. struct uaccess_ops uaccess;
  63. EXPORT_SYMBOL(uaccess);
  64. /*
  65. * Machine setup..
  66. */
  67. unsigned int console_mode = 0;
  68. unsigned int console_devno = -1;
  69. unsigned int console_irq = -1;
  70. unsigned long machine_flags = 0;
  71. unsigned long elf_hwcap = 0;
  72. char elf_platform[ELF_PLATFORM_SIZE];
  73. struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
  74. volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  75. static unsigned long __initdata memory_end;
  76. /*
  77. * This is set up by the setup-routine at boot-time
  78. * for S390 need to find out, what we have to setup
  79. * using address 0x10400 ...
  80. */
  81. #include <asm/setup.h>
  82. static struct resource code_resource = {
  83. .name = "Kernel code",
  84. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  85. };
  86. static struct resource data_resource = {
  87. .name = "Kernel data",
  88. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  89. };
  90. /*
  91. * cpu_init() initializes state that is per-CPU.
  92. */
  93. void __cpuinit cpu_init(void)
  94. {
  95. int addr = hard_smp_processor_id();
  96. /*
  97. * Store processor id in lowcore (used e.g. in timer_interrupt)
  98. */
  99. get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
  100. S390_lowcore.cpu_data.cpu_addr = addr;
  101. /*
  102. * Force FPU initialization:
  103. */
  104. clear_thread_flag(TIF_USEDFPU);
  105. clear_used_math();
  106. atomic_inc(&init_mm.mm_count);
  107. current->active_mm = &init_mm;
  108. if (current->mm)
  109. BUG();
  110. enter_lazy_tlb(&init_mm, current);
  111. }
  112. /*
  113. * VM halt and poweroff setup routines
  114. */
  115. char vmhalt_cmd[128] = "";
  116. char vmpoff_cmd[128] = "";
  117. static char vmpanic_cmd[128] = "";
  118. static void strncpy_skip_quote(char *dst, char *src, int n)
  119. {
  120. int sx, dx;
  121. dx = 0;
  122. for (sx = 0; src[sx] != 0; sx++) {
  123. if (src[sx] == '"') continue;
  124. dst[dx++] = src[sx];
  125. if (dx >= n) break;
  126. }
  127. }
  128. static int __init vmhalt_setup(char *str)
  129. {
  130. strncpy_skip_quote(vmhalt_cmd, str, 127);
  131. vmhalt_cmd[127] = 0;
  132. return 1;
  133. }
  134. __setup("vmhalt=", vmhalt_setup);
  135. static int __init vmpoff_setup(char *str)
  136. {
  137. strncpy_skip_quote(vmpoff_cmd, str, 127);
  138. vmpoff_cmd[127] = 0;
  139. return 1;
  140. }
  141. __setup("vmpoff=", vmpoff_setup);
  142. static int vmpanic_notify(struct notifier_block *self, unsigned long event,
  143. void *data)
  144. {
  145. if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
  146. cpcmd(vmpanic_cmd, NULL, 0, NULL);
  147. return NOTIFY_OK;
  148. }
  149. #define PANIC_PRI_VMPANIC 0
  150. static struct notifier_block vmpanic_nb = {
  151. .notifier_call = vmpanic_notify,
  152. .priority = PANIC_PRI_VMPANIC
  153. };
  154. static int __init vmpanic_setup(char *str)
  155. {
  156. static int register_done __initdata = 0;
  157. strncpy_skip_quote(vmpanic_cmd, str, 127);
  158. vmpanic_cmd[127] = 0;
  159. if (!register_done) {
  160. register_done = 1;
  161. atomic_notifier_chain_register(&panic_notifier_list,
  162. &vmpanic_nb);
  163. }
  164. return 1;
  165. }
  166. __setup("vmpanic=", vmpanic_setup);
  167. /*
  168. * condev= and conmode= setup parameter.
  169. */
  170. static int __init condev_setup(char *str)
  171. {
  172. int vdev;
  173. vdev = simple_strtoul(str, &str, 0);
  174. if (vdev >= 0 && vdev < 65536) {
  175. console_devno = vdev;
  176. console_irq = -1;
  177. }
  178. return 1;
  179. }
  180. __setup("condev=", condev_setup);
  181. static int __init conmode_setup(char *str)
  182. {
  183. #if defined(CONFIG_SCLP_CONSOLE)
  184. if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
  185. SET_CONSOLE_SCLP;
  186. #endif
  187. #if defined(CONFIG_TN3215_CONSOLE)
  188. if (strncmp(str, "3215", 5) == 0)
  189. SET_CONSOLE_3215;
  190. #endif
  191. #if defined(CONFIG_TN3270_CONSOLE)
  192. if (strncmp(str, "3270", 5) == 0)
  193. SET_CONSOLE_3270;
  194. #endif
  195. return 1;
  196. }
  197. __setup("conmode=", conmode_setup);
  198. static void __init conmode_default(void)
  199. {
  200. char query_buffer[1024];
  201. char *ptr;
  202. if (MACHINE_IS_VM) {
  203. cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
  204. console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
  205. ptr = strstr(query_buffer, "SUBCHANNEL =");
  206. console_irq = simple_strtoul(ptr + 13, NULL, 16);
  207. cpcmd("QUERY TERM", query_buffer, 1024, NULL);
  208. ptr = strstr(query_buffer, "CONMODE");
  209. /*
  210. * Set the conmode to 3215 so that the device recognition
  211. * will set the cu_type of the console to 3215. If the
  212. * conmode is 3270 and we don't set it back then both
  213. * 3215 and the 3270 driver will try to access the console
  214. * device (3215 as console and 3270 as normal tty).
  215. */
  216. cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
  217. if (ptr == NULL) {
  218. #if defined(CONFIG_SCLP_CONSOLE)
  219. SET_CONSOLE_SCLP;
  220. #endif
  221. return;
  222. }
  223. if (strncmp(ptr + 8, "3270", 4) == 0) {
  224. #if defined(CONFIG_TN3270_CONSOLE)
  225. SET_CONSOLE_3270;
  226. #elif defined(CONFIG_TN3215_CONSOLE)
  227. SET_CONSOLE_3215;
  228. #elif defined(CONFIG_SCLP_CONSOLE)
  229. SET_CONSOLE_SCLP;
  230. #endif
  231. } else if (strncmp(ptr + 8, "3215", 4) == 0) {
  232. #if defined(CONFIG_TN3215_CONSOLE)
  233. SET_CONSOLE_3215;
  234. #elif defined(CONFIG_TN3270_CONSOLE)
  235. SET_CONSOLE_3270;
  236. #elif defined(CONFIG_SCLP_CONSOLE)
  237. SET_CONSOLE_SCLP;
  238. #endif
  239. }
  240. } else if (MACHINE_IS_P390) {
  241. #if defined(CONFIG_TN3215_CONSOLE)
  242. SET_CONSOLE_3215;
  243. #elif defined(CONFIG_TN3270_CONSOLE)
  244. SET_CONSOLE_3270;
  245. #endif
  246. } else {
  247. #if defined(CONFIG_SCLP_CONSOLE)
  248. SET_CONSOLE_SCLP;
  249. #endif
  250. }
  251. }
  252. #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
  253. static void __init setup_zfcpdump(unsigned int console_devno)
  254. {
  255. static char str[64];
  256. if (ipl_info.type != IPL_TYPE_FCP_DUMP)
  257. return;
  258. if (console_devno != -1)
  259. sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
  260. ipl_info.data.fcp.dev_id.devno, console_devno);
  261. else
  262. sprintf(str, "cio_ignore=all,!0.0.%04x",
  263. ipl_info.data.fcp.dev_id.devno);
  264. strcat(COMMAND_LINE, " ");
  265. strcat(COMMAND_LINE, str);
  266. console_loglevel = 2;
  267. }
  268. #else
  269. static inline void setup_zfcpdump(unsigned int console_devno) {}
  270. #endif /* CONFIG_ZFCPDUMP */
  271. #ifdef CONFIG_SMP
  272. void (*_machine_restart)(char *command) = machine_restart_smp;
  273. void (*_machine_halt)(void) = machine_halt_smp;
  274. void (*_machine_power_off)(void) = machine_power_off_smp;
  275. #else
  276. /*
  277. * Reboot, halt and power_off routines for non SMP.
  278. */
  279. static void do_machine_restart_nonsmp(char * __unused)
  280. {
  281. do_reipl();
  282. }
  283. static void do_machine_halt_nonsmp(void)
  284. {
  285. if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
  286. __cpcmd(vmhalt_cmd, NULL, 0, NULL);
  287. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  288. }
  289. static void do_machine_power_off_nonsmp(void)
  290. {
  291. if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
  292. __cpcmd(vmpoff_cmd, NULL, 0, NULL);
  293. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  294. }
  295. void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
  296. void (*_machine_halt)(void) = do_machine_halt_nonsmp;
  297. void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
  298. #endif
  299. /*
  300. * Reboot, halt and power_off stubs. They just call _machine_restart,
  301. * _machine_halt or _machine_power_off.
  302. */
  303. void machine_restart(char *command)
  304. {
  305. if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
  306. /*
  307. * Only unblank the console if we are called in enabled
  308. * context or a bust_spinlocks cleared the way for us.
  309. */
  310. console_unblank();
  311. _machine_restart(command);
  312. }
  313. void machine_halt(void)
  314. {
  315. if (!in_interrupt() || oops_in_progress)
  316. /*
  317. * Only unblank the console if we are called in enabled
  318. * context or a bust_spinlocks cleared the way for us.
  319. */
  320. console_unblank();
  321. _machine_halt();
  322. }
  323. void machine_power_off(void)
  324. {
  325. if (!in_interrupt() || oops_in_progress)
  326. /*
  327. * Only unblank the console if we are called in enabled
  328. * context or a bust_spinlocks cleared the way for us.
  329. */
  330. console_unblank();
  331. _machine_power_off();
  332. }
  333. /*
  334. * Dummy power off function.
  335. */
  336. void (*pm_power_off)(void) = machine_power_off;
  337. static int __init early_parse_mem(char *p)
  338. {
  339. memory_end = memparse(p, &p);
  340. return 0;
  341. }
  342. early_param("mem", early_parse_mem);
  343. /*
  344. * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
  345. */
  346. static int __init early_parse_ipldelay(char *p)
  347. {
  348. unsigned long delay = 0;
  349. delay = simple_strtoul(p, &p, 0);
  350. switch (*p) {
  351. case 's':
  352. case 'S':
  353. delay *= 1000000;
  354. break;
  355. case 'm':
  356. case 'M':
  357. delay *= 60 * 1000000;
  358. }
  359. /* now wait for the requested amount of time */
  360. udelay(delay);
  361. return 0;
  362. }
  363. early_param("ipldelay", early_parse_ipldelay);
  364. #ifdef CONFIG_S390_SWITCH_AMODE
  365. unsigned int switch_amode = 0;
  366. EXPORT_SYMBOL_GPL(switch_amode);
  367. static void set_amode_and_uaccess(unsigned long user_amode,
  368. unsigned long user32_amode)
  369. {
  370. psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
  371. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  372. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  373. #ifdef CONFIG_COMPAT
  374. psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
  375. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  376. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  377. psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
  378. PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
  379. PSW32_MASK_PSTATE;
  380. #endif
  381. psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  382. PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
  383. if (MACHINE_HAS_MVCOS) {
  384. printk("mvcos available.\n");
  385. memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
  386. } else {
  387. printk("mvcos not available.\n");
  388. memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
  389. }
  390. }
  391. /*
  392. * Switch kernel/user addressing modes?
  393. */
  394. static int __init early_parse_switch_amode(char *p)
  395. {
  396. switch_amode = 1;
  397. return 0;
  398. }
  399. early_param("switch_amode", early_parse_switch_amode);
  400. #else /* CONFIG_S390_SWITCH_AMODE */
  401. static inline void set_amode_and_uaccess(unsigned long user_amode,
  402. unsigned long user32_amode)
  403. {
  404. }
  405. #endif /* CONFIG_S390_SWITCH_AMODE */
  406. #ifdef CONFIG_S390_EXEC_PROTECT
  407. unsigned int s390_noexec = 0;
  408. EXPORT_SYMBOL_GPL(s390_noexec);
  409. /*
  410. * Enable execute protection?
  411. */
  412. static int __init early_parse_noexec(char *p)
  413. {
  414. if (!strncmp(p, "off", 3))
  415. return 0;
  416. switch_amode = 1;
  417. s390_noexec = 1;
  418. return 0;
  419. }
  420. early_param("noexec", early_parse_noexec);
  421. #endif /* CONFIG_S390_EXEC_PROTECT */
  422. static void setup_addressing_mode(void)
  423. {
  424. if (s390_noexec) {
  425. printk("S390 execute protection active, ");
  426. set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
  427. } else if (switch_amode) {
  428. printk("S390 address spaces switched, ");
  429. set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
  430. }
  431. #ifdef CONFIG_TRACE_IRQFLAGS
  432. sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
  433. io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
  434. #endif
  435. }
  436. static void __init
  437. setup_lowcore(void)
  438. {
  439. struct _lowcore *lc;
  440. int lc_pages;
  441. /*
  442. * Setup lowcore for boot cpu
  443. */
  444. lc_pages = sizeof(void *) == 8 ? 2 : 1;
  445. lc = (struct _lowcore *)
  446. __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
  447. memset(lc, 0, lc_pages * PAGE_SIZE);
  448. lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
  449. lc->restart_psw.addr =
  450. PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
  451. if (switch_amode)
  452. lc->restart_psw.mask |= PSW_ASC_HOME;
  453. lc->external_new_psw.mask = psw_kernel_bits;
  454. lc->external_new_psw.addr =
  455. PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
  456. lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
  457. lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
  458. lc->program_new_psw.mask = psw_kernel_bits;
  459. lc->program_new_psw.addr =
  460. PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
  461. lc->mcck_new_psw.mask =
  462. psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
  463. lc->mcck_new_psw.addr =
  464. PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
  465. lc->io_new_psw.mask = psw_kernel_bits;
  466. lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
  467. lc->ipl_device = S390_lowcore.ipl_device;
  468. lc->jiffy_timer = -1LL;
  469. lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
  470. lc->async_stack = (unsigned long)
  471. __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
  472. lc->panic_stack = (unsigned long)
  473. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
  474. lc->current_task = (unsigned long) init_thread_union.thread_info.task;
  475. lc->thread_info = (unsigned long) &init_thread_union;
  476. #ifndef CONFIG_64BIT
  477. if (MACHINE_HAS_IEEE) {
  478. lc->extended_save_area_addr = (__u32)
  479. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
  480. /* enable extended save area */
  481. __ctl_set_bit(14, 29);
  482. }
  483. #endif
  484. set_prefix((u32)(unsigned long) lc);
  485. }
  486. static void __init
  487. setup_resources(void)
  488. {
  489. struct resource *res, *sub_res;
  490. int i;
  491. code_resource.start = (unsigned long) &_text;
  492. code_resource.end = (unsigned long) &_etext - 1;
  493. data_resource.start = (unsigned long) &_etext;
  494. data_resource.end = (unsigned long) &_edata - 1;
  495. for (i = 0; i < MEMORY_CHUNKS; i++) {
  496. if (!memory_chunk[i].size)
  497. continue;
  498. res = alloc_bootmem_low(sizeof(struct resource));
  499. res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
  500. switch (memory_chunk[i].type) {
  501. case CHUNK_READ_WRITE:
  502. res->name = "System RAM";
  503. break;
  504. case CHUNK_READ_ONLY:
  505. res->name = "System ROM";
  506. res->flags |= IORESOURCE_READONLY;
  507. break;
  508. default:
  509. res->name = "reserved";
  510. }
  511. res->start = memory_chunk[i].addr;
  512. res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
  513. request_resource(&iomem_resource, res);
  514. if (code_resource.start >= res->start &&
  515. code_resource.start <= res->end &&
  516. code_resource.end > res->end) {
  517. sub_res = alloc_bootmem_low(sizeof(struct resource));
  518. memcpy(sub_res, &code_resource,
  519. sizeof(struct resource));
  520. sub_res->end = res->end;
  521. code_resource.start = res->end + 1;
  522. request_resource(res, sub_res);
  523. }
  524. if (code_resource.start >= res->start &&
  525. code_resource.start <= res->end &&
  526. code_resource.end <= res->end)
  527. request_resource(res, &code_resource);
  528. if (data_resource.start >= res->start &&
  529. data_resource.start <= res->end &&
  530. data_resource.end > res->end) {
  531. sub_res = alloc_bootmem_low(sizeof(struct resource));
  532. memcpy(sub_res, &data_resource,
  533. sizeof(struct resource));
  534. sub_res->end = res->end;
  535. data_resource.start = res->end + 1;
  536. request_resource(res, sub_res);
  537. }
  538. if (data_resource.start >= res->start &&
  539. data_resource.start <= res->end &&
  540. data_resource.end <= res->end)
  541. request_resource(res, &data_resource);
  542. }
  543. }
  544. unsigned long real_memory_size;
  545. EXPORT_SYMBOL_GPL(real_memory_size);
  546. static void __init setup_memory_end(void)
  547. {
  548. unsigned long memory_size;
  549. unsigned long max_mem;
  550. int i;
  551. #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
  552. if (ipl_info.type == IPL_TYPE_FCP_DUMP)
  553. memory_end = ZFCPDUMP_HSA_SIZE;
  554. #endif
  555. memory_size = 0;
  556. memory_end &= PAGE_MASK;
  557. max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
  558. memory_end = min(max_mem, memory_end);
  559. /*
  560. * Make sure all chunks are MAX_ORDER aligned so we don't need the
  561. * extra checks that HOLES_IN_ZONE would require.
  562. */
  563. for (i = 0; i < MEMORY_CHUNKS; i++) {
  564. unsigned long start, end;
  565. struct mem_chunk *chunk;
  566. unsigned long align;
  567. chunk = &memory_chunk[i];
  568. align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
  569. start = (chunk->addr + align - 1) & ~(align - 1);
  570. end = (chunk->addr + chunk->size) & ~(align - 1);
  571. if (start >= end)
  572. memset(chunk, 0, sizeof(*chunk));
  573. else {
  574. chunk->addr = start;
  575. chunk->size = end - start;
  576. }
  577. }
  578. for (i = 0; i < MEMORY_CHUNKS; i++) {
  579. struct mem_chunk *chunk = &memory_chunk[i];
  580. real_memory_size = max(real_memory_size,
  581. chunk->addr + chunk->size);
  582. if (chunk->addr >= max_mem) {
  583. memset(chunk, 0, sizeof(*chunk));
  584. continue;
  585. }
  586. if (chunk->addr + chunk->size > max_mem)
  587. chunk->size = max_mem - chunk->addr;
  588. memory_size = max(memory_size, chunk->addr + chunk->size);
  589. }
  590. if (!memory_end)
  591. memory_end = memory_size;
  592. }
  593. static void __init
  594. setup_memory(void)
  595. {
  596. unsigned long bootmap_size;
  597. unsigned long start_pfn, end_pfn;
  598. int i;
  599. /*
  600. * partially used pages are not usable - thus
  601. * we are rounding upwards:
  602. */
  603. start_pfn = PFN_UP(__pa(&_end));
  604. end_pfn = max_pfn = PFN_DOWN(memory_end);
  605. #ifdef CONFIG_BLK_DEV_INITRD
  606. /*
  607. * Move the initrd in case the bitmap of the bootmem allocater
  608. * would overwrite it.
  609. */
  610. if (INITRD_START && INITRD_SIZE) {
  611. unsigned long bmap_size;
  612. unsigned long start;
  613. bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
  614. bmap_size = PFN_PHYS(bmap_size);
  615. if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
  616. start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
  617. if (start + INITRD_SIZE > memory_end) {
  618. printk("initrd extends beyond end of memory "
  619. "(0x%08lx > 0x%08lx)\n"
  620. "disabling initrd\n",
  621. start + INITRD_SIZE, memory_end);
  622. INITRD_START = INITRD_SIZE = 0;
  623. } else {
  624. printk("Moving initrd (0x%08lx -> 0x%08lx, "
  625. "size: %ld)\n",
  626. INITRD_START, start, INITRD_SIZE);
  627. memmove((void *) start, (void *) INITRD_START,
  628. INITRD_SIZE);
  629. INITRD_START = start;
  630. }
  631. }
  632. }
  633. #endif
  634. /*
  635. * Initialize the boot-time allocator
  636. */
  637. bootmap_size = init_bootmem(start_pfn, end_pfn);
  638. /*
  639. * Register RAM areas with the bootmem allocator.
  640. */
  641. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  642. unsigned long start_chunk, end_chunk, pfn;
  643. if (memory_chunk[i].type != CHUNK_READ_WRITE)
  644. continue;
  645. start_chunk = PFN_DOWN(memory_chunk[i].addr);
  646. end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
  647. end_chunk = min(end_chunk, end_pfn);
  648. if (start_chunk >= end_chunk)
  649. continue;
  650. add_active_range(0, start_chunk, end_chunk);
  651. pfn = max(start_chunk, start_pfn);
  652. for (; pfn <= end_chunk; pfn++)
  653. page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
  654. }
  655. psw_set_key(PAGE_DEFAULT_KEY);
  656. free_bootmem_with_active_regions(0, max_pfn);
  657. /*
  658. * Reserve memory used for lowcore/command line/kernel image.
  659. */
  660. reserve_bootmem(0, (unsigned long)_ehead);
  661. reserve_bootmem((unsigned long)_stext,
  662. PFN_PHYS(start_pfn) - (unsigned long)_stext);
  663. /*
  664. * Reserve the bootmem bitmap itself as well. We do this in two
  665. * steps (first step was init_bootmem()) because this catches
  666. * the (very unlikely) case of us accidentally initializing the
  667. * bootmem allocator with an invalid RAM area.
  668. */
  669. reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
  670. #ifdef CONFIG_BLK_DEV_INITRD
  671. if (INITRD_START && INITRD_SIZE) {
  672. if (INITRD_START + INITRD_SIZE <= memory_end) {
  673. reserve_bootmem(INITRD_START, INITRD_SIZE);
  674. initrd_start = INITRD_START;
  675. initrd_end = initrd_start + INITRD_SIZE;
  676. } else {
  677. printk("initrd extends beyond end of memory "
  678. "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
  679. initrd_start + INITRD_SIZE, memory_end);
  680. initrd_start = initrd_end = 0;
  681. }
  682. }
  683. #endif
  684. }
  685. static __init unsigned int stfl(void)
  686. {
  687. asm volatile(
  688. " .insn s,0xb2b10000,0(0)\n" /* stfl */
  689. "0:\n"
  690. EX_TABLE(0b,0b));
  691. return S390_lowcore.stfl_fac_list;
  692. }
  693. static __init int stfle(unsigned long long *list, int doublewords)
  694. {
  695. typedef struct { unsigned long long _[doublewords]; } addrtype;
  696. register unsigned long __nr asm("0") = doublewords - 1;
  697. asm volatile(".insn s,0xb2b00000,%0" /* stfle */
  698. : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
  699. return __nr + 1;
  700. }
  701. /*
  702. * Setup hardware capabilities.
  703. */
  704. static void __init setup_hwcaps(void)
  705. {
  706. static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
  707. struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
  708. unsigned long long facility_list_extended;
  709. unsigned int facility_list;
  710. int i;
  711. facility_list = stfl();
  712. /*
  713. * The store facility list bits numbers as found in the principles
  714. * of operation are numbered with bit 1UL<<31 as number 0 to
  715. * bit 1UL<<0 as number 31.
  716. * Bit 0: instructions named N3, "backported" to esa-mode
  717. * Bit 2: z/Architecture mode is active
  718. * Bit 7: the store-facility-list-extended facility is installed
  719. * Bit 17: the message-security assist is installed
  720. * Bit 19: the long-displacement facility is installed
  721. * Bit 21: the extended-immediate facility is installed
  722. * These get translated to:
  723. * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
  724. * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
  725. * HWCAP_S390_LDISP bit 4, and HWCAP_S390_EIMM bit 5.
  726. */
  727. for (i = 0; i < 6; i++)
  728. if (facility_list & (1UL << (31 - stfl_bits[i])))
  729. elf_hwcap |= 1UL << i;
  730. /*
  731. * Check for additional facilities with store-facility-list-extended.
  732. * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
  733. * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
  734. * as stored by stfl, bits 32-xxx contain additional facilities.
  735. * How many facility words are stored depends on the number of
  736. * doublewords passed to the instruction. The additional facilites
  737. * are:
  738. * Bit 43: decimal floating point facility is installed
  739. * translated to:
  740. * HWCAP_S390_DFP bit 6.
  741. */
  742. if ((elf_hwcap & (1UL << 2)) &&
  743. stfle(&facility_list_extended, 1) > 0) {
  744. if (facility_list_extended & (1ULL << (64 - 43)))
  745. elf_hwcap |= 1UL << 6;
  746. }
  747. switch (cpuinfo->cpu_id.machine) {
  748. case 0x9672:
  749. #if !defined(CONFIG_64BIT)
  750. default: /* Use "g5" as default for 31 bit kernels. */
  751. #endif
  752. strcpy(elf_platform, "g5");
  753. break;
  754. case 0x2064:
  755. case 0x2066:
  756. #if defined(CONFIG_64BIT)
  757. default: /* Use "z900" as default for 64 bit kernels. */
  758. #endif
  759. strcpy(elf_platform, "z900");
  760. break;
  761. case 0x2084:
  762. case 0x2086:
  763. strcpy(elf_platform, "z990");
  764. break;
  765. case 0x2094:
  766. strcpy(elf_platform, "z9-109");
  767. break;
  768. }
  769. }
  770. /*
  771. * Setup function called from init/main.c just after the banner
  772. * was printed.
  773. */
  774. void __init
  775. setup_arch(char **cmdline_p)
  776. {
  777. /*
  778. * print what head.S has found out about the machine
  779. */
  780. #ifndef CONFIG_64BIT
  781. printk((MACHINE_IS_VM) ?
  782. "We are running under VM (31 bit mode)\n" :
  783. "We are running native (31 bit mode)\n");
  784. printk((MACHINE_HAS_IEEE) ?
  785. "This machine has an IEEE fpu\n" :
  786. "This machine has no IEEE fpu\n");
  787. #else /* CONFIG_64BIT */
  788. printk((MACHINE_IS_VM) ?
  789. "We are running under VM (64 bit mode)\n" :
  790. "We are running native (64 bit mode)\n");
  791. #endif /* CONFIG_64BIT */
  792. /* Save unparsed command line copy for /proc/cmdline */
  793. strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
  794. *cmdline_p = COMMAND_LINE;
  795. *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
  796. ROOT_DEV = Root_RAM0;
  797. init_mm.start_code = PAGE_OFFSET;
  798. init_mm.end_code = (unsigned long) &_etext;
  799. init_mm.end_data = (unsigned long) &_edata;
  800. init_mm.brk = (unsigned long) &_end;
  801. if (MACHINE_HAS_MVCOS)
  802. memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
  803. else
  804. memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
  805. parse_early_param();
  806. setup_ipl_info();
  807. setup_memory_end();
  808. setup_addressing_mode();
  809. setup_memory();
  810. setup_resources();
  811. setup_lowcore();
  812. cpu_init();
  813. __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
  814. smp_setup_cpu_possible_map();
  815. /*
  816. * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
  817. */
  818. setup_hwcaps();
  819. /*
  820. * Create kernel page tables and switch to virtual addressing.
  821. */
  822. paging_init();
  823. /* Setup default console */
  824. conmode_default();
  825. /* Setup zfcpdump support */
  826. setup_zfcpdump(console_devno);
  827. }
  828. void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
  829. {
  830. printk(KERN_INFO "cpu %d "
  831. #ifdef CONFIG_SMP
  832. "phys_idx=%d "
  833. #endif
  834. "vers=%02X ident=%06X machine=%04X unused=%04X\n",
  835. cpuinfo->cpu_nr,
  836. #ifdef CONFIG_SMP
  837. cpuinfo->cpu_addr,
  838. #endif
  839. cpuinfo->cpu_id.version,
  840. cpuinfo->cpu_id.ident,
  841. cpuinfo->cpu_id.machine,
  842. cpuinfo->cpu_id.unused);
  843. }
  844. /*
  845. * show_cpuinfo - Get information on one CPU for use by procfs.
  846. */
  847. static int show_cpuinfo(struct seq_file *m, void *v)
  848. {
  849. static const char *hwcap_str[7] = {
  850. "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp"
  851. };
  852. struct cpuinfo_S390 *cpuinfo;
  853. unsigned long n = (unsigned long) v - 1;
  854. int i;
  855. s390_adjust_jiffies();
  856. preempt_disable();
  857. if (!n) {
  858. seq_printf(m, "vendor_id : IBM/S390\n"
  859. "# processors : %i\n"
  860. "bogomips per cpu: %lu.%02lu\n",
  861. num_online_cpus(), loops_per_jiffy/(500000/HZ),
  862. (loops_per_jiffy/(5000/HZ))%100);
  863. seq_puts(m, "features\t: ");
  864. for (i = 0; i < 7; i++)
  865. if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
  866. seq_printf(m, "%s ", hwcap_str[i]);
  867. seq_puts(m, "\n");
  868. }
  869. if (cpu_online(n)) {
  870. #ifdef CONFIG_SMP
  871. if (smp_processor_id() == n)
  872. cpuinfo = &S390_lowcore.cpu_data;
  873. else
  874. cpuinfo = &lowcore_ptr[n]->cpu_data;
  875. #else
  876. cpuinfo = &S390_lowcore.cpu_data;
  877. #endif
  878. seq_printf(m, "processor %li: "
  879. "version = %02X, "
  880. "identification = %06X, "
  881. "machine = %04X\n",
  882. n, cpuinfo->cpu_id.version,
  883. cpuinfo->cpu_id.ident,
  884. cpuinfo->cpu_id.machine);
  885. }
  886. preempt_enable();
  887. return 0;
  888. }
  889. static void *c_start(struct seq_file *m, loff_t *pos)
  890. {
  891. return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
  892. }
  893. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  894. {
  895. ++*pos;
  896. return c_start(m, pos);
  897. }
  898. static void c_stop(struct seq_file *m, void *v)
  899. {
  900. }
  901. struct seq_operations cpuinfo_op = {
  902. .start = c_start,
  903. .next = c_next,
  904. .stop = c_stop,
  905. .show = show_cpuinfo,
  906. };