setup.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. /*
  2. * arch/s390/kernel/setup.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com),
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "arch/i386/kernel/setup.c"
  10. * Copyright (C) 1995, Linus Torvalds
  11. */
  12. /*
  13. * This file handles the architecture-dependent parts of initialization
  14. */
  15. #include <linux/errno.h>
  16. #include <linux/module.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/stddef.h>
  21. #include <linux/unistd.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/slab.h>
  24. #include <linux/user.h>
  25. #include <linux/tty.h>
  26. #include <linux/ioport.h>
  27. #include <linux/delay.h>
  28. #include <linux/init.h>
  29. #include <linux/initrd.h>
  30. #include <linux/bootmem.h>
  31. #include <linux/root_dev.h>
  32. #include <linux/console.h>
  33. #include <linux/seq_file.h>
  34. #include <linux/kernel_stat.h>
  35. #include <linux/device.h>
  36. #include <linux/notifier.h>
  37. #include <linux/pfn.h>
  38. #include <linux/ctype.h>
  39. #include <linux/reboot.h>
  40. #include <asm/ipl.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/system.h>
  43. #include <asm/smp.h>
  44. #include <asm/mmu_context.h>
  45. #include <asm/cpcmd.h>
  46. #include <asm/lowcore.h>
  47. #include <asm/irq.h>
  48. #include <asm/page.h>
  49. #include <asm/ptrace.h>
  50. #include <asm/sections.h>
  51. #include <asm/ebcdic.h>
  52. #include <asm/compat.h>
  53. long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
  54. PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
  55. long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  56. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  57. PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  58. /*
  59. * User copy operations.
  60. */
  61. struct uaccess_ops uaccess;
  62. EXPORT_SYMBOL(uaccess);
  63. /*
  64. * Machine setup..
  65. */
  66. unsigned int console_mode = 0;
  67. unsigned int console_devno = -1;
  68. unsigned int console_irq = -1;
  69. unsigned long machine_flags = 0;
  70. unsigned long elf_hwcap = 0;
  71. char elf_platform[ELF_PLATFORM_SIZE];
  72. struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS];
  73. volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  74. static unsigned long __initdata memory_end;
  75. /*
  76. * This is set up by the setup-routine at boot-time
  77. * for S390 need to find out, what we have to setup
  78. * using address 0x10400 ...
  79. */
  80. #include <asm/setup.h>
  81. static struct resource code_resource = {
  82. .name = "Kernel code",
  83. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  84. };
  85. static struct resource data_resource = {
  86. .name = "Kernel data",
  87. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  88. };
  89. /*
  90. * cpu_init() initializes state that is per-CPU.
  91. */
  92. void __cpuinit cpu_init(void)
  93. {
  94. int addr = hard_smp_processor_id();
  95. /*
  96. * Store processor id in lowcore (used e.g. in timer_interrupt)
  97. */
  98. get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
  99. S390_lowcore.cpu_data.cpu_addr = addr;
  100. /*
  101. * Force FPU initialization:
  102. */
  103. clear_thread_flag(TIF_USEDFPU);
  104. clear_used_math();
  105. atomic_inc(&init_mm.mm_count);
  106. current->active_mm = &init_mm;
  107. if (current->mm)
  108. BUG();
  109. enter_lazy_tlb(&init_mm, current);
  110. }
  111. /*
  112. * condev= and conmode= setup parameter.
  113. */
  114. static int __init condev_setup(char *str)
  115. {
  116. int vdev;
  117. vdev = simple_strtoul(str, &str, 0);
  118. if (vdev >= 0 && vdev < 65536) {
  119. console_devno = vdev;
  120. console_irq = -1;
  121. }
  122. return 1;
  123. }
  124. __setup("condev=", condev_setup);
  125. static int __init conmode_setup(char *str)
  126. {
  127. #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  128. if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
  129. SET_CONSOLE_SCLP;
  130. #endif
  131. #if defined(CONFIG_TN3215_CONSOLE)
  132. if (strncmp(str, "3215", 5) == 0)
  133. SET_CONSOLE_3215;
  134. #endif
  135. #if defined(CONFIG_TN3270_CONSOLE)
  136. if (strncmp(str, "3270", 5) == 0)
  137. SET_CONSOLE_3270;
  138. #endif
  139. return 1;
  140. }
  141. __setup("conmode=", conmode_setup);
  142. static void __init conmode_default(void)
  143. {
  144. char query_buffer[1024];
  145. char *ptr;
  146. if (MACHINE_IS_VM) {
  147. cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
  148. console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
  149. ptr = strstr(query_buffer, "SUBCHANNEL =");
  150. console_irq = simple_strtoul(ptr + 13, NULL, 16);
  151. cpcmd("QUERY TERM", query_buffer, 1024, NULL);
  152. ptr = strstr(query_buffer, "CONMODE");
  153. /*
  154. * Set the conmode to 3215 so that the device recognition
  155. * will set the cu_type of the console to 3215. If the
  156. * conmode is 3270 and we don't set it back then both
  157. * 3215 and the 3270 driver will try to access the console
  158. * device (3215 as console and 3270 as normal tty).
  159. */
  160. cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
  161. if (ptr == NULL) {
  162. #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  163. SET_CONSOLE_SCLP;
  164. #endif
  165. return;
  166. }
  167. if (strncmp(ptr + 8, "3270", 4) == 0) {
  168. #if defined(CONFIG_TN3270_CONSOLE)
  169. SET_CONSOLE_3270;
  170. #elif defined(CONFIG_TN3215_CONSOLE)
  171. SET_CONSOLE_3215;
  172. #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  173. SET_CONSOLE_SCLP;
  174. #endif
  175. } else if (strncmp(ptr + 8, "3215", 4) == 0) {
  176. #if defined(CONFIG_TN3215_CONSOLE)
  177. SET_CONSOLE_3215;
  178. #elif defined(CONFIG_TN3270_CONSOLE)
  179. SET_CONSOLE_3270;
  180. #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  181. SET_CONSOLE_SCLP;
  182. #endif
  183. }
  184. } else if (MACHINE_IS_P390) {
  185. #if defined(CONFIG_TN3215_CONSOLE)
  186. SET_CONSOLE_3215;
  187. #elif defined(CONFIG_TN3270_CONSOLE)
  188. SET_CONSOLE_3270;
  189. #endif
  190. } else {
  191. #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
  192. SET_CONSOLE_SCLP;
  193. #endif
  194. }
  195. }
  196. #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
  197. static void __init setup_zfcpdump(unsigned int console_devno)
  198. {
  199. static char str[64];
  200. if (ipl_info.type != IPL_TYPE_FCP_DUMP)
  201. return;
  202. if (console_devno != -1)
  203. sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
  204. ipl_info.data.fcp.dev_id.devno, console_devno);
  205. else
  206. sprintf(str, "cio_ignore=all,!0.0.%04x",
  207. ipl_info.data.fcp.dev_id.devno);
  208. strcat(COMMAND_LINE, " ");
  209. strcat(COMMAND_LINE, str);
  210. console_loglevel = 2;
  211. }
  212. #else
  213. static inline void setup_zfcpdump(unsigned int console_devno) {}
  214. #endif /* CONFIG_ZFCPDUMP */
  215. /*
  216. * Reboot, halt and power_off stubs. They just call _machine_restart,
  217. * _machine_halt or _machine_power_off.
  218. */
  219. void machine_restart(char *command)
  220. {
  221. if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
  222. /*
  223. * Only unblank the console if we are called in enabled
  224. * context or a bust_spinlocks cleared the way for us.
  225. */
  226. console_unblank();
  227. _machine_restart(command);
  228. }
  229. void machine_halt(void)
  230. {
  231. if (!in_interrupt() || oops_in_progress)
  232. /*
  233. * Only unblank the console if we are called in enabled
  234. * context or a bust_spinlocks cleared the way for us.
  235. */
  236. console_unblank();
  237. _machine_halt();
  238. }
  239. void machine_power_off(void)
  240. {
  241. if (!in_interrupt() || oops_in_progress)
  242. /*
  243. * Only unblank the console if we are called in enabled
  244. * context or a bust_spinlocks cleared the way for us.
  245. */
  246. console_unblank();
  247. _machine_power_off();
  248. }
  249. /*
  250. * Dummy power off function.
  251. */
  252. void (*pm_power_off)(void) = machine_power_off;
  253. static int __init early_parse_mem(char *p)
  254. {
  255. memory_end = memparse(p, &p);
  256. return 0;
  257. }
  258. early_param("mem", early_parse_mem);
  259. /*
  260. * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
  261. */
  262. static int __init early_parse_ipldelay(char *p)
  263. {
  264. unsigned long delay = 0;
  265. delay = simple_strtoul(p, &p, 0);
  266. switch (*p) {
  267. case 's':
  268. case 'S':
  269. delay *= 1000000;
  270. break;
  271. case 'm':
  272. case 'M':
  273. delay *= 60 * 1000000;
  274. }
  275. /* now wait for the requested amount of time */
  276. udelay(delay);
  277. return 0;
  278. }
  279. early_param("ipldelay", early_parse_ipldelay);
  280. #ifdef CONFIG_S390_SWITCH_AMODE
  281. unsigned int switch_amode = 0;
  282. EXPORT_SYMBOL_GPL(switch_amode);
  283. static void set_amode_and_uaccess(unsigned long user_amode,
  284. unsigned long user32_amode)
  285. {
  286. psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
  287. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  288. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  289. #ifdef CONFIG_COMPAT
  290. psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
  291. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  292. PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
  293. psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
  294. PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
  295. PSW32_MASK_PSTATE;
  296. #endif
  297. psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  298. PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
  299. if (MACHINE_HAS_MVCOS) {
  300. printk("mvcos available.\n");
  301. memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
  302. } else {
  303. printk("mvcos not available.\n");
  304. memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
  305. }
  306. }
  307. /*
  308. * Switch kernel/user addressing modes?
  309. */
  310. static int __init early_parse_switch_amode(char *p)
  311. {
  312. switch_amode = 1;
  313. return 0;
  314. }
  315. early_param("switch_amode", early_parse_switch_amode);
  316. #else /* CONFIG_S390_SWITCH_AMODE */
  317. static inline void set_amode_and_uaccess(unsigned long user_amode,
  318. unsigned long user32_amode)
  319. {
  320. }
  321. #endif /* CONFIG_S390_SWITCH_AMODE */
  322. #ifdef CONFIG_S390_EXEC_PROTECT
  323. unsigned int s390_noexec = 0;
  324. EXPORT_SYMBOL_GPL(s390_noexec);
  325. /*
  326. * Enable execute protection?
  327. */
  328. static int __init early_parse_noexec(char *p)
  329. {
  330. if (!strncmp(p, "off", 3))
  331. return 0;
  332. switch_amode = 1;
  333. s390_noexec = 1;
  334. return 0;
  335. }
  336. early_param("noexec", early_parse_noexec);
  337. #endif /* CONFIG_S390_EXEC_PROTECT */
  338. static void setup_addressing_mode(void)
  339. {
  340. if (s390_noexec) {
  341. printk("S390 execute protection active, ");
  342. set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
  343. } else if (switch_amode) {
  344. printk("S390 address spaces switched, ");
  345. set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
  346. }
  347. #ifdef CONFIG_TRACE_IRQFLAGS
  348. sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
  349. io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
  350. #endif
  351. }
  352. static void __init
  353. setup_lowcore(void)
  354. {
  355. struct _lowcore *lc;
  356. int lc_pages;
  357. /*
  358. * Setup lowcore for boot cpu
  359. */
  360. lc_pages = sizeof(void *) == 8 ? 2 : 1;
  361. lc = (struct _lowcore *)
  362. __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
  363. memset(lc, 0, lc_pages * PAGE_SIZE);
  364. lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
  365. lc->restart_psw.addr =
  366. PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
  367. if (switch_amode)
  368. lc->restart_psw.mask |= PSW_ASC_HOME;
  369. lc->external_new_psw.mask = psw_kernel_bits;
  370. lc->external_new_psw.addr =
  371. PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
  372. lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
  373. lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
  374. lc->program_new_psw.mask = psw_kernel_bits;
  375. lc->program_new_psw.addr =
  376. PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
  377. lc->mcck_new_psw.mask =
  378. psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
  379. lc->mcck_new_psw.addr =
  380. PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
  381. lc->io_new_psw.mask = psw_kernel_bits;
  382. lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
  383. lc->ipl_device = S390_lowcore.ipl_device;
  384. lc->jiffy_timer = -1LL;
  385. lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
  386. lc->async_stack = (unsigned long)
  387. __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
  388. lc->panic_stack = (unsigned long)
  389. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
  390. lc->current_task = (unsigned long) init_thread_union.thread_info.task;
  391. lc->thread_info = (unsigned long) &init_thread_union;
  392. #ifndef CONFIG_64BIT
  393. if (MACHINE_HAS_IEEE) {
  394. lc->extended_save_area_addr = (__u32)
  395. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
  396. /* enable extended save area */
  397. __ctl_set_bit(14, 29);
  398. }
  399. #endif
  400. set_prefix((u32)(unsigned long) lc);
  401. }
  402. static void __init
  403. setup_resources(void)
  404. {
  405. struct resource *res, *sub_res;
  406. int i;
  407. code_resource.start = (unsigned long) &_text;
  408. code_resource.end = (unsigned long) &_etext - 1;
  409. data_resource.start = (unsigned long) &_etext;
  410. data_resource.end = (unsigned long) &_edata - 1;
  411. for (i = 0; i < MEMORY_CHUNKS; i++) {
  412. if (!memory_chunk[i].size)
  413. continue;
  414. res = alloc_bootmem_low(sizeof(struct resource));
  415. res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
  416. switch (memory_chunk[i].type) {
  417. case CHUNK_READ_WRITE:
  418. res->name = "System RAM";
  419. break;
  420. case CHUNK_READ_ONLY:
  421. res->name = "System ROM";
  422. res->flags |= IORESOURCE_READONLY;
  423. break;
  424. default:
  425. res->name = "reserved";
  426. }
  427. res->start = memory_chunk[i].addr;
  428. res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
  429. request_resource(&iomem_resource, res);
  430. if (code_resource.start >= res->start &&
  431. code_resource.start <= res->end &&
  432. code_resource.end > res->end) {
  433. sub_res = alloc_bootmem_low(sizeof(struct resource));
  434. memcpy(sub_res, &code_resource,
  435. sizeof(struct resource));
  436. sub_res->end = res->end;
  437. code_resource.start = res->end + 1;
  438. request_resource(res, sub_res);
  439. }
  440. if (code_resource.start >= res->start &&
  441. code_resource.start <= res->end &&
  442. code_resource.end <= res->end)
  443. request_resource(res, &code_resource);
  444. if (data_resource.start >= res->start &&
  445. data_resource.start <= res->end &&
  446. data_resource.end > res->end) {
  447. sub_res = alloc_bootmem_low(sizeof(struct resource));
  448. memcpy(sub_res, &data_resource,
  449. sizeof(struct resource));
  450. sub_res->end = res->end;
  451. data_resource.start = res->end + 1;
  452. request_resource(res, sub_res);
  453. }
  454. if (data_resource.start >= res->start &&
  455. data_resource.start <= res->end &&
  456. data_resource.end <= res->end)
  457. request_resource(res, &data_resource);
  458. }
  459. }
  460. unsigned long real_memory_size;
  461. EXPORT_SYMBOL_GPL(real_memory_size);
  462. static void __init setup_memory_end(void)
  463. {
  464. unsigned long memory_size;
  465. unsigned long max_mem;
  466. int i;
  467. #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
  468. if (ipl_info.type == IPL_TYPE_FCP_DUMP)
  469. memory_end = ZFCPDUMP_HSA_SIZE;
  470. #endif
  471. memory_size = 0;
  472. memory_end &= PAGE_MASK;
  473. max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
  474. memory_end = min(max_mem, memory_end);
  475. /*
  476. * Make sure all chunks are MAX_ORDER aligned so we don't need the
  477. * extra checks that HOLES_IN_ZONE would require.
  478. */
  479. for (i = 0; i < MEMORY_CHUNKS; i++) {
  480. unsigned long start, end;
  481. struct mem_chunk *chunk;
  482. unsigned long align;
  483. chunk = &memory_chunk[i];
  484. align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
  485. start = (chunk->addr + align - 1) & ~(align - 1);
  486. end = (chunk->addr + chunk->size) & ~(align - 1);
  487. if (start >= end)
  488. memset(chunk, 0, sizeof(*chunk));
  489. else {
  490. chunk->addr = start;
  491. chunk->size = end - start;
  492. }
  493. }
  494. for (i = 0; i < MEMORY_CHUNKS; i++) {
  495. struct mem_chunk *chunk = &memory_chunk[i];
  496. real_memory_size = max(real_memory_size,
  497. chunk->addr + chunk->size);
  498. if (chunk->addr >= max_mem) {
  499. memset(chunk, 0, sizeof(*chunk));
  500. continue;
  501. }
  502. if (chunk->addr + chunk->size > max_mem)
  503. chunk->size = max_mem - chunk->addr;
  504. memory_size = max(memory_size, chunk->addr + chunk->size);
  505. }
  506. if (!memory_end)
  507. memory_end = memory_size;
  508. }
  509. static void __init
  510. setup_memory(void)
  511. {
  512. unsigned long bootmap_size;
  513. unsigned long start_pfn, end_pfn;
  514. int i;
  515. /*
  516. * partially used pages are not usable - thus
  517. * we are rounding upwards:
  518. */
  519. start_pfn = PFN_UP(__pa(&_end));
  520. end_pfn = max_pfn = PFN_DOWN(memory_end);
  521. #ifdef CONFIG_BLK_DEV_INITRD
  522. /*
  523. * Move the initrd in case the bitmap of the bootmem allocater
  524. * would overwrite it.
  525. */
  526. if (INITRD_START && INITRD_SIZE) {
  527. unsigned long bmap_size;
  528. unsigned long start;
  529. bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
  530. bmap_size = PFN_PHYS(bmap_size);
  531. if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
  532. start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
  533. if (start + INITRD_SIZE > memory_end) {
  534. printk("initrd extends beyond end of memory "
  535. "(0x%08lx > 0x%08lx)\n"
  536. "disabling initrd\n",
  537. start + INITRD_SIZE, memory_end);
  538. INITRD_START = INITRD_SIZE = 0;
  539. } else {
  540. printk("Moving initrd (0x%08lx -> 0x%08lx, "
  541. "size: %ld)\n",
  542. INITRD_START, start, INITRD_SIZE);
  543. memmove((void *) start, (void *) INITRD_START,
  544. INITRD_SIZE);
  545. INITRD_START = start;
  546. }
  547. }
  548. }
  549. #endif
  550. /*
  551. * Initialize the boot-time allocator
  552. */
  553. bootmap_size = init_bootmem(start_pfn, end_pfn);
  554. /*
  555. * Register RAM areas with the bootmem allocator.
  556. */
  557. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  558. unsigned long start_chunk, end_chunk, pfn;
  559. if (memory_chunk[i].type != CHUNK_READ_WRITE)
  560. continue;
  561. start_chunk = PFN_DOWN(memory_chunk[i].addr);
  562. end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
  563. end_chunk = min(end_chunk, end_pfn);
  564. if (start_chunk >= end_chunk)
  565. continue;
  566. add_active_range(0, start_chunk, end_chunk);
  567. pfn = max(start_chunk, start_pfn);
  568. for (; pfn <= end_chunk; pfn++)
  569. page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
  570. }
  571. psw_set_key(PAGE_DEFAULT_KEY);
  572. free_bootmem_with_active_regions(0, max_pfn);
  573. /*
  574. * Reserve memory used for lowcore/command line/kernel image.
  575. */
  576. reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
  577. reserve_bootmem((unsigned long)_stext,
  578. PFN_PHYS(start_pfn) - (unsigned long)_stext,
  579. BOOTMEM_DEFAULT);
  580. /*
  581. * Reserve the bootmem bitmap itself as well. We do this in two
  582. * steps (first step was init_bootmem()) because this catches
  583. * the (very unlikely) case of us accidentally initializing the
  584. * bootmem allocator with an invalid RAM area.
  585. */
  586. reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
  587. BOOTMEM_DEFAULT);
  588. #ifdef CONFIG_BLK_DEV_INITRD
  589. if (INITRD_START && INITRD_SIZE) {
  590. if (INITRD_START + INITRD_SIZE <= memory_end) {
  591. reserve_bootmem(INITRD_START, INITRD_SIZE,
  592. BOOTMEM_DEFAULT);
  593. initrd_start = INITRD_START;
  594. initrd_end = initrd_start + INITRD_SIZE;
  595. } else {
  596. printk("initrd extends beyond end of memory "
  597. "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
  598. initrd_start + INITRD_SIZE, memory_end);
  599. initrd_start = initrd_end = 0;
  600. }
  601. }
  602. #endif
  603. }
  604. static __init unsigned int stfl(void)
  605. {
  606. asm volatile(
  607. " .insn s,0xb2b10000,0(0)\n" /* stfl */
  608. "0:\n"
  609. EX_TABLE(0b,0b));
  610. return S390_lowcore.stfl_fac_list;
  611. }
  612. static __init int stfle(unsigned long long *list, int doublewords)
  613. {
  614. typedef struct { unsigned long long _[doublewords]; } addrtype;
  615. register unsigned long __nr asm("0") = doublewords - 1;
  616. asm volatile(".insn s,0xb2b00000,%0" /* stfle */
  617. : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
  618. return __nr + 1;
  619. }
  620. /*
  621. * Setup hardware capabilities.
  622. */
  623. static void __init setup_hwcaps(void)
  624. {
  625. static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
  626. struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
  627. unsigned long long facility_list_extended;
  628. unsigned int facility_list;
  629. int i;
  630. facility_list = stfl();
  631. /*
  632. * The store facility list bits numbers as found in the principles
  633. * of operation are numbered with bit 1UL<<31 as number 0 to
  634. * bit 1UL<<0 as number 31.
  635. * Bit 0: instructions named N3, "backported" to esa-mode
  636. * Bit 2: z/Architecture mode is active
  637. * Bit 7: the store-facility-list-extended facility is installed
  638. * Bit 17: the message-security assist is installed
  639. * Bit 19: the long-displacement facility is installed
  640. * Bit 21: the extended-immediate facility is installed
  641. * These get translated to:
  642. * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
  643. * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
  644. * HWCAP_S390_LDISP bit 4, and HWCAP_S390_EIMM bit 5.
  645. */
  646. for (i = 0; i < 6; i++)
  647. if (facility_list & (1UL << (31 - stfl_bits[i])))
  648. elf_hwcap |= 1UL << i;
  649. /*
  650. * Check for additional facilities with store-facility-list-extended.
  651. * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
  652. * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
  653. * as stored by stfl, bits 32-xxx contain additional facilities.
  654. * How many facility words are stored depends on the number of
  655. * doublewords passed to the instruction. The additional facilites
  656. * are:
  657. * Bit 43: decimal floating point facility is installed
  658. * translated to:
  659. * HWCAP_S390_DFP bit 6.
  660. */
  661. if ((elf_hwcap & (1UL << 2)) &&
  662. stfle(&facility_list_extended, 1) > 0) {
  663. if (facility_list_extended & (1ULL << (64 - 43)))
  664. elf_hwcap |= 1UL << 6;
  665. }
  666. switch (cpuinfo->cpu_id.machine) {
  667. case 0x9672:
  668. #if !defined(CONFIG_64BIT)
  669. default: /* Use "g5" as default for 31 bit kernels. */
  670. #endif
  671. strcpy(elf_platform, "g5");
  672. break;
  673. case 0x2064:
  674. case 0x2066:
  675. #if defined(CONFIG_64BIT)
  676. default: /* Use "z900" as default for 64 bit kernels. */
  677. #endif
  678. strcpy(elf_platform, "z900");
  679. break;
  680. case 0x2084:
  681. case 0x2086:
  682. strcpy(elf_platform, "z990");
  683. break;
  684. case 0x2094:
  685. strcpy(elf_platform, "z9-109");
  686. break;
  687. }
  688. }
  689. /*
  690. * Setup function called from init/main.c just after the banner
  691. * was printed.
  692. */
  693. void __init
  694. setup_arch(char **cmdline_p)
  695. {
  696. /*
  697. * print what head.S has found out about the machine
  698. */
  699. #ifndef CONFIG_64BIT
  700. printk((MACHINE_IS_VM) ?
  701. "We are running under VM (31 bit mode)\n" :
  702. "We are running native (31 bit mode)\n");
  703. printk((MACHINE_HAS_IEEE) ?
  704. "This machine has an IEEE fpu\n" :
  705. "This machine has no IEEE fpu\n");
  706. #else /* CONFIG_64BIT */
  707. printk((MACHINE_IS_VM) ?
  708. "We are running under VM (64 bit mode)\n" :
  709. "We are running native (64 bit mode)\n");
  710. #endif /* CONFIG_64BIT */
  711. /* Save unparsed command line copy for /proc/cmdline */
  712. strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
  713. *cmdline_p = COMMAND_LINE;
  714. *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
  715. ROOT_DEV = Root_RAM0;
  716. init_mm.start_code = PAGE_OFFSET;
  717. init_mm.end_code = (unsigned long) &_etext;
  718. init_mm.end_data = (unsigned long) &_edata;
  719. init_mm.brk = (unsigned long) &_end;
  720. if (MACHINE_HAS_MVCOS)
  721. memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
  722. else
  723. memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
  724. parse_early_param();
  725. setup_ipl();
  726. setup_memory_end();
  727. setup_addressing_mode();
  728. setup_memory();
  729. setup_resources();
  730. setup_lowcore();
  731. cpu_init();
  732. __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
  733. /*
  734. * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
  735. */
  736. setup_hwcaps();
  737. /*
  738. * Create kernel page tables and switch to virtual addressing.
  739. */
  740. paging_init();
  741. /* Setup default console */
  742. conmode_default();
  743. /* Setup zfcpdump support */
  744. setup_zfcpdump(console_devno);
  745. }
  746. void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
  747. {
  748. printk(KERN_INFO "cpu %d "
  749. #ifdef CONFIG_SMP
  750. "phys_idx=%d "
  751. #endif
  752. "vers=%02X ident=%06X machine=%04X unused=%04X\n",
  753. cpuinfo->cpu_nr,
  754. #ifdef CONFIG_SMP
  755. cpuinfo->cpu_addr,
  756. #endif
  757. cpuinfo->cpu_id.version,
  758. cpuinfo->cpu_id.ident,
  759. cpuinfo->cpu_id.machine,
  760. cpuinfo->cpu_id.unused);
  761. }
  762. /*
  763. * show_cpuinfo - Get information on one CPU for use by procfs.
  764. */
  765. static int show_cpuinfo(struct seq_file *m, void *v)
  766. {
  767. static const char *hwcap_str[7] = {
  768. "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp"
  769. };
  770. struct cpuinfo_S390 *cpuinfo;
  771. unsigned long n = (unsigned long) v - 1;
  772. int i;
  773. s390_adjust_jiffies();
  774. preempt_disable();
  775. if (!n) {
  776. seq_printf(m, "vendor_id : IBM/S390\n"
  777. "# processors : %i\n"
  778. "bogomips per cpu: %lu.%02lu\n",
  779. num_online_cpus(), loops_per_jiffy/(500000/HZ),
  780. (loops_per_jiffy/(5000/HZ))%100);
  781. seq_puts(m, "features\t: ");
  782. for (i = 0; i < 7; i++)
  783. if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
  784. seq_printf(m, "%s ", hwcap_str[i]);
  785. seq_puts(m, "\n");
  786. }
  787. if (cpu_online(n)) {
  788. #ifdef CONFIG_SMP
  789. if (smp_processor_id() == n)
  790. cpuinfo = &S390_lowcore.cpu_data;
  791. else
  792. cpuinfo = &lowcore_ptr[n]->cpu_data;
  793. #else
  794. cpuinfo = &S390_lowcore.cpu_data;
  795. #endif
  796. seq_printf(m, "processor %li: "
  797. "version = %02X, "
  798. "identification = %06X, "
  799. "machine = %04X\n",
  800. n, cpuinfo->cpu_id.version,
  801. cpuinfo->cpu_id.ident,
  802. cpuinfo->cpu_id.machine);
  803. }
  804. preempt_enable();
  805. return 0;
  806. }
  807. static void *c_start(struct seq_file *m, loff_t *pos)
  808. {
  809. return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
  810. }
  811. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  812. {
  813. ++*pos;
  814. return c_start(m, pos);
  815. }
  816. static void c_stop(struct seq_file *m, void *v)
  817. {
  818. }
  819. const struct seq_operations cpuinfo_op = {
  820. .start = c_start,
  821. .next = c_next,
  822. .stop = c_stop,
  823. .show = show_cpuinfo,
  824. };