setup.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * arch/s390/kernel/setup.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com),
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "arch/i386/kernel/setup.c"
  10. * Copyright (C) 1995, Linus Torvalds
  11. */
  12. /*
  13. * This file handles the architecture-dependent parts of initialization
  14. */
  15. #include <linux/errno.h>
  16. #include <linux/module.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/stddef.h>
  21. #include <linux/unistd.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/slab.h>
  24. #include <linux/user.h>
  25. #include <linux/a.out.h>
  26. #include <linux/tty.h>
  27. #include <linux/ioport.h>
  28. #include <linux/delay.h>
  29. #include <linux/init.h>
  30. #include <linux/initrd.h>
  31. #include <linux/bootmem.h>
  32. #include <linux/root_dev.h>
  33. #include <linux/console.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/kernel_stat.h>
  36. #include <linux/device.h>
  37. #include <linux/notifier.h>
  38. #include <linux/pfn.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/system.h>
  41. #include <asm/smp.h>
  42. #include <asm/mmu_context.h>
  43. #include <asm/cpcmd.h>
  44. #include <asm/lowcore.h>
  45. #include <asm/irq.h>
  46. #include <asm/page.h>
  47. #include <asm/ptrace.h>
  48. #include <asm/sections.h>
  49. /*
  50. * User copy operations.
  51. */
  52. struct uaccess_ops uaccess;
  53. EXPORT_SYMBOL_GPL(uaccess);
  54. /*
  55. * Machine setup..
  56. */
  57. unsigned int console_mode = 0;
  58. unsigned int console_devno = -1;
  59. unsigned int console_irq = -1;
  60. unsigned long machine_flags = 0;
  61. struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
  62. volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  63. unsigned long __initdata zholes_size[MAX_NR_ZONES];
  64. static unsigned long __initdata memory_end;
  65. /*
  66. * This is set up by the setup-routine at boot-time
  67. * for S390 need to find out, what we have to setup
  68. * using address 0x10400 ...
  69. */
  70. #include <asm/setup.h>
  71. static struct resource code_resource = {
  72. .name = "Kernel code",
  73. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  74. };
  75. static struct resource data_resource = {
  76. .name = "Kernel data",
  77. .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  78. };
  79. /*
  80. * cpu_init() initializes state that is per-CPU.
  81. */
  82. void __devinit cpu_init (void)
  83. {
  84. int addr = hard_smp_processor_id();
  85. /*
  86. * Store processor id in lowcore (used e.g. in timer_interrupt)
  87. */
  88. asm volatile("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
  89. S390_lowcore.cpu_data.cpu_addr = addr;
  90. /*
  91. * Force FPU initialization:
  92. */
  93. clear_thread_flag(TIF_USEDFPU);
  94. clear_used_math();
  95. atomic_inc(&init_mm.mm_count);
  96. current->active_mm = &init_mm;
  97. if (current->mm)
  98. BUG();
  99. enter_lazy_tlb(&init_mm, current);
  100. }
  101. /*
  102. * VM halt and poweroff setup routines
  103. */
  104. char vmhalt_cmd[128] = "";
  105. char vmpoff_cmd[128] = "";
  106. char vmpanic_cmd[128] = "";
  107. static inline void strncpy_skip_quote(char *dst, char *src, int n)
  108. {
  109. int sx, dx;
  110. dx = 0;
  111. for (sx = 0; src[sx] != 0; sx++) {
  112. if (src[sx] == '"') continue;
  113. dst[dx++] = src[sx];
  114. if (dx >= n) break;
  115. }
  116. }
  117. static int __init vmhalt_setup(char *str)
  118. {
  119. strncpy_skip_quote(vmhalt_cmd, str, 127);
  120. vmhalt_cmd[127] = 0;
  121. return 1;
  122. }
  123. __setup("vmhalt=", vmhalt_setup);
  124. static int __init vmpoff_setup(char *str)
  125. {
  126. strncpy_skip_quote(vmpoff_cmd, str, 127);
  127. vmpoff_cmd[127] = 0;
  128. return 1;
  129. }
  130. __setup("vmpoff=", vmpoff_setup);
  131. static int vmpanic_notify(struct notifier_block *self, unsigned long event,
  132. void *data)
  133. {
  134. if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
  135. cpcmd(vmpanic_cmd, NULL, 0, NULL);
  136. return NOTIFY_OK;
  137. }
  138. #define PANIC_PRI_VMPANIC 0
  139. static struct notifier_block vmpanic_nb = {
  140. .notifier_call = vmpanic_notify,
  141. .priority = PANIC_PRI_VMPANIC
  142. };
  143. static int __init vmpanic_setup(char *str)
  144. {
  145. static int register_done __initdata = 0;
  146. strncpy_skip_quote(vmpanic_cmd, str, 127);
  147. vmpanic_cmd[127] = 0;
  148. if (!register_done) {
  149. register_done = 1;
  150. atomic_notifier_chain_register(&panic_notifier_list,
  151. &vmpanic_nb);
  152. }
  153. return 1;
  154. }
  155. __setup("vmpanic=", vmpanic_setup);
  156. /*
  157. * condev= and conmode= setup parameter.
  158. */
  159. static int __init condev_setup(char *str)
  160. {
  161. int vdev;
  162. vdev = simple_strtoul(str, &str, 0);
  163. if (vdev >= 0 && vdev < 65536) {
  164. console_devno = vdev;
  165. console_irq = -1;
  166. }
  167. return 1;
  168. }
  169. __setup("condev=", condev_setup);
  170. static int __init conmode_setup(char *str)
  171. {
  172. #if defined(CONFIG_SCLP_CONSOLE)
  173. if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
  174. SET_CONSOLE_SCLP;
  175. #endif
  176. #if defined(CONFIG_TN3215_CONSOLE)
  177. if (strncmp(str, "3215", 5) == 0)
  178. SET_CONSOLE_3215;
  179. #endif
  180. #if defined(CONFIG_TN3270_CONSOLE)
  181. if (strncmp(str, "3270", 5) == 0)
  182. SET_CONSOLE_3270;
  183. #endif
  184. return 1;
  185. }
  186. __setup("conmode=", conmode_setup);
  187. static void __init conmode_default(void)
  188. {
  189. char query_buffer[1024];
  190. char *ptr;
  191. if (MACHINE_IS_VM) {
  192. cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
  193. console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
  194. ptr = strstr(query_buffer, "SUBCHANNEL =");
  195. console_irq = simple_strtoul(ptr + 13, NULL, 16);
  196. cpcmd("QUERY TERM", query_buffer, 1024, NULL);
  197. ptr = strstr(query_buffer, "CONMODE");
  198. /*
  199. * Set the conmode to 3215 so that the device recognition
  200. * will set the cu_type of the console to 3215. If the
  201. * conmode is 3270 and we don't set it back then both
  202. * 3215 and the 3270 driver will try to access the console
  203. * device (3215 as console and 3270 as normal tty).
  204. */
  205. cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
  206. if (ptr == NULL) {
  207. #if defined(CONFIG_SCLP_CONSOLE)
  208. SET_CONSOLE_SCLP;
  209. #endif
  210. return;
  211. }
  212. if (strncmp(ptr + 8, "3270", 4) == 0) {
  213. #if defined(CONFIG_TN3270_CONSOLE)
  214. SET_CONSOLE_3270;
  215. #elif defined(CONFIG_TN3215_CONSOLE)
  216. SET_CONSOLE_3215;
  217. #elif defined(CONFIG_SCLP_CONSOLE)
  218. SET_CONSOLE_SCLP;
  219. #endif
  220. } else if (strncmp(ptr + 8, "3215", 4) == 0) {
  221. #if defined(CONFIG_TN3215_CONSOLE)
  222. SET_CONSOLE_3215;
  223. #elif defined(CONFIG_TN3270_CONSOLE)
  224. SET_CONSOLE_3270;
  225. #elif defined(CONFIG_SCLP_CONSOLE)
  226. SET_CONSOLE_SCLP;
  227. #endif
  228. }
  229. } else if (MACHINE_IS_P390) {
  230. #if defined(CONFIG_TN3215_CONSOLE)
  231. SET_CONSOLE_3215;
  232. #elif defined(CONFIG_TN3270_CONSOLE)
  233. SET_CONSOLE_3270;
  234. #endif
  235. } else {
  236. #if defined(CONFIG_SCLP_CONSOLE)
  237. SET_CONSOLE_SCLP;
  238. #endif
  239. }
  240. }
  241. #ifdef CONFIG_SMP
  242. extern void machine_restart_smp(char *);
  243. extern void machine_halt_smp(void);
  244. extern void machine_power_off_smp(void);
  245. void (*_machine_restart)(char *command) = machine_restart_smp;
  246. void (*_machine_halt)(void) = machine_halt_smp;
  247. void (*_machine_power_off)(void) = machine_power_off_smp;
  248. #else
  249. /*
  250. * Reboot, halt and power_off routines for non SMP.
  251. */
  252. static void do_machine_restart_nonsmp(char * __unused)
  253. {
  254. do_reipl();
  255. }
  256. static void do_machine_halt_nonsmp(void)
  257. {
  258. if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
  259. __cpcmd(vmhalt_cmd, NULL, 0, NULL);
  260. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  261. }
  262. static void do_machine_power_off_nonsmp(void)
  263. {
  264. if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
  265. __cpcmd(vmpoff_cmd, NULL, 0, NULL);
  266. signal_processor(smp_processor_id(), sigp_stop_and_store_status);
  267. }
  268. void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
  269. void (*_machine_halt)(void) = do_machine_halt_nonsmp;
  270. void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
  271. #endif
  272. /*
  273. * Reboot, halt and power_off stubs. They just call _machine_restart,
  274. * _machine_halt or _machine_power_off.
  275. */
  276. void machine_restart(char *command)
  277. {
  278. if (!in_interrupt() || oops_in_progress)
  279. /*
  280. * Only unblank the console if we are called in enabled
  281. * context or a bust_spinlocks cleared the way for us.
  282. */
  283. console_unblank();
  284. _machine_restart(command);
  285. }
  286. void machine_halt(void)
  287. {
  288. if (!in_interrupt() || oops_in_progress)
  289. /*
  290. * Only unblank the console if we are called in enabled
  291. * context or a bust_spinlocks cleared the way for us.
  292. */
  293. console_unblank();
  294. _machine_halt();
  295. }
  296. void machine_power_off(void)
  297. {
  298. if (!in_interrupt() || oops_in_progress)
  299. /*
  300. * Only unblank the console if we are called in enabled
  301. * context or a bust_spinlocks cleared the way for us.
  302. */
  303. console_unblank();
  304. _machine_power_off();
  305. }
  306. /*
  307. * Dummy power off function.
  308. */
  309. void (*pm_power_off)(void) = machine_power_off;
  310. static void __init
  311. add_memory_hole(unsigned long start, unsigned long end)
  312. {
  313. unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
  314. if (end <= dma_pfn)
  315. zholes_size[ZONE_DMA] += end - start + 1;
  316. else if (start > dma_pfn)
  317. zholes_size[ZONE_NORMAL] += end - start + 1;
  318. else {
  319. zholes_size[ZONE_DMA] += dma_pfn - start + 1;
  320. zholes_size[ZONE_NORMAL] += end - dma_pfn;
  321. }
  322. }
  323. static int __init early_parse_mem(char *p)
  324. {
  325. memory_end = memparse(p, &p);
  326. return 0;
  327. }
  328. early_param("mem", early_parse_mem);
  329. /*
  330. * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
  331. */
  332. static int __init early_parse_ipldelay(char *p)
  333. {
  334. unsigned long delay = 0;
  335. delay = simple_strtoul(p, &p, 0);
  336. switch (*p) {
  337. case 's':
  338. case 'S':
  339. delay *= 1000000;
  340. break;
  341. case 'm':
  342. case 'M':
  343. delay *= 60 * 1000000;
  344. }
  345. /* now wait for the requested amount of time */
  346. udelay(delay);
  347. return 0;
  348. }
  349. early_param("ipldelay", early_parse_ipldelay);
  350. static void __init
  351. setup_lowcore(void)
  352. {
  353. struct _lowcore *lc;
  354. int lc_pages;
  355. /*
  356. * Setup lowcore for boot cpu
  357. */
  358. lc_pages = sizeof(void *) == 8 ? 2 : 1;
  359. lc = (struct _lowcore *)
  360. __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
  361. memset(lc, 0, lc_pages * PAGE_SIZE);
  362. lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
  363. lc->restart_psw.addr =
  364. PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
  365. lc->external_new_psw.mask = PSW_KERNEL_BITS;
  366. lc->external_new_psw.addr =
  367. PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
  368. lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
  369. lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
  370. lc->program_new_psw.mask = PSW_KERNEL_BITS;
  371. lc->program_new_psw.addr =
  372. PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
  373. lc->mcck_new_psw.mask =
  374. PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
  375. lc->mcck_new_psw.addr =
  376. PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
  377. lc->io_new_psw.mask = PSW_KERNEL_BITS;
  378. lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
  379. lc->ipl_device = S390_lowcore.ipl_device;
  380. lc->jiffy_timer = -1LL;
  381. lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
  382. lc->async_stack = (unsigned long)
  383. __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
  384. lc->panic_stack = (unsigned long)
  385. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
  386. lc->current_task = (unsigned long) init_thread_union.thread_info.task;
  387. lc->thread_info = (unsigned long) &init_thread_union;
  388. #ifndef CONFIG_64BIT
  389. if (MACHINE_HAS_IEEE) {
  390. lc->extended_save_area_addr = (__u32)
  391. __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
  392. /* enable extended save area */
  393. __ctl_set_bit(14, 29);
  394. }
  395. #endif
  396. set_prefix((u32)(unsigned long) lc);
  397. }
  398. static void __init
  399. setup_resources(void)
  400. {
  401. struct resource *res;
  402. int i;
  403. code_resource.start = (unsigned long) &_text;
  404. code_resource.end = (unsigned long) &_etext - 1;
  405. data_resource.start = (unsigned long) &_etext;
  406. data_resource.end = (unsigned long) &_edata - 1;
  407. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  408. res = alloc_bootmem_low(sizeof(struct resource));
  409. res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
  410. switch (memory_chunk[i].type) {
  411. case CHUNK_READ_WRITE:
  412. res->name = "System RAM";
  413. break;
  414. case CHUNK_READ_ONLY:
  415. res->name = "System ROM";
  416. res->flags |= IORESOURCE_READONLY;
  417. break;
  418. default:
  419. res->name = "reserved";
  420. }
  421. res->start = memory_chunk[i].addr;
  422. res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
  423. request_resource(&iomem_resource, res);
  424. request_resource(res, &code_resource);
  425. request_resource(res, &data_resource);
  426. }
  427. }
  428. static void __init setup_memory_end(void)
  429. {
  430. unsigned long real_size, memory_size;
  431. unsigned long max_mem, max_phys;
  432. int i;
  433. memory_size = real_size = 0;
  434. max_phys = VMALLOC_END - VMALLOC_MIN_SIZE;
  435. memory_end &= PAGE_MASK;
  436. max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
  437. for (i = 0; i < MEMORY_CHUNKS; i++) {
  438. struct mem_chunk *chunk = &memory_chunk[i];
  439. real_size = max(real_size, chunk->addr + chunk->size);
  440. if (chunk->addr >= max_mem) {
  441. memset(chunk, 0, sizeof(*chunk));
  442. continue;
  443. }
  444. if (chunk->addr + chunk->size > max_mem)
  445. chunk->size = max_mem - chunk->addr;
  446. memory_size = max(memory_size, chunk->addr + chunk->size);
  447. }
  448. if (!memory_end)
  449. memory_end = memory_size;
  450. if (real_size > memory_end)
  451. printk("More memory detected than supported. Unused: %luk\n",
  452. (real_size - memory_end) >> 10);
  453. }
  454. static void __init
  455. setup_memory(void)
  456. {
  457. unsigned long bootmap_size;
  458. unsigned long start_pfn, end_pfn, init_pfn;
  459. unsigned long last_rw_end;
  460. int i;
  461. /*
  462. * partially used pages are not usable - thus
  463. * we are rounding upwards:
  464. */
  465. start_pfn = PFN_UP(__pa(&_end));
  466. end_pfn = max_pfn = PFN_DOWN(memory_end);
  467. /* Initialize storage key for kernel pages */
  468. for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
  469. page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
  470. #ifdef CONFIG_BLK_DEV_INITRD
  471. /*
  472. * Move the initrd in case the bitmap of the bootmem allocater
  473. * would overwrite it.
  474. */
  475. if (INITRD_START && INITRD_SIZE) {
  476. unsigned long bmap_size;
  477. unsigned long start;
  478. bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
  479. bmap_size = PFN_PHYS(bmap_size);
  480. if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
  481. start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
  482. if (start + INITRD_SIZE > memory_end) {
  483. printk("initrd extends beyond end of memory "
  484. "(0x%08lx > 0x%08lx)\n"
  485. "disabling initrd\n",
  486. start + INITRD_SIZE, memory_end);
  487. INITRD_START = INITRD_SIZE = 0;
  488. } else {
  489. printk("Moving initrd (0x%08lx -> 0x%08lx, "
  490. "size: %ld)\n",
  491. INITRD_START, start, INITRD_SIZE);
  492. memmove((void *) start, (void *) INITRD_START,
  493. INITRD_SIZE);
  494. INITRD_START = start;
  495. }
  496. }
  497. }
  498. #endif
  499. /*
  500. * Initialize the boot-time allocator
  501. */
  502. bootmap_size = init_bootmem(start_pfn, end_pfn);
  503. /*
  504. * Register RAM areas with the bootmem allocator.
  505. */
  506. last_rw_end = start_pfn;
  507. for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  508. unsigned long start_chunk, end_chunk;
  509. if (memory_chunk[i].type != CHUNK_READ_WRITE)
  510. continue;
  511. start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
  512. start_chunk >>= PAGE_SHIFT;
  513. end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
  514. end_chunk >>= PAGE_SHIFT;
  515. if (start_chunk < start_pfn)
  516. start_chunk = start_pfn;
  517. if (end_chunk > end_pfn)
  518. end_chunk = end_pfn;
  519. if (start_chunk < end_chunk) {
  520. /* Initialize storage key for RAM pages */
  521. for (init_pfn = start_chunk ; init_pfn < end_chunk;
  522. init_pfn++)
  523. page_set_storage_key(init_pfn << PAGE_SHIFT,
  524. PAGE_DEFAULT_KEY);
  525. free_bootmem(start_chunk << PAGE_SHIFT,
  526. (end_chunk - start_chunk) << PAGE_SHIFT);
  527. if (last_rw_end < start_chunk)
  528. add_memory_hole(last_rw_end, start_chunk - 1);
  529. last_rw_end = end_chunk;
  530. }
  531. }
  532. psw_set_key(PAGE_DEFAULT_KEY);
  533. if (last_rw_end < end_pfn - 1)
  534. add_memory_hole(last_rw_end, end_pfn - 1);
  535. /*
  536. * Reserve the bootmem bitmap itself as well. We do this in two
  537. * steps (first step was init_bootmem()) because this catches
  538. * the (very unlikely) case of us accidentally initializing the
  539. * bootmem allocator with an invalid RAM area.
  540. */
  541. reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
  542. #ifdef CONFIG_BLK_DEV_INITRD
  543. if (INITRD_START && INITRD_SIZE) {
  544. if (INITRD_START + INITRD_SIZE <= memory_end) {
  545. reserve_bootmem(INITRD_START, INITRD_SIZE);
  546. initrd_start = INITRD_START;
  547. initrd_end = initrd_start + INITRD_SIZE;
  548. } else {
  549. printk("initrd extends beyond end of memory "
  550. "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
  551. initrd_start + INITRD_SIZE, memory_end);
  552. initrd_start = initrd_end = 0;
  553. }
  554. }
  555. #endif
  556. }
  557. /*
  558. * Setup function called from init/main.c just after the banner
  559. * was printed.
  560. */
  561. void __init
  562. setup_arch(char **cmdline_p)
  563. {
  564. /*
  565. * print what head.S has found out about the machine
  566. */
  567. #ifndef CONFIG_64BIT
  568. printk((MACHINE_IS_VM) ?
  569. "We are running under VM (31 bit mode)\n" :
  570. "We are running native (31 bit mode)\n");
  571. printk((MACHINE_HAS_IEEE) ?
  572. "This machine has an IEEE fpu\n" :
  573. "This machine has no IEEE fpu\n");
  574. #else /* CONFIG_64BIT */
  575. printk((MACHINE_IS_VM) ?
  576. "We are running under VM (64 bit mode)\n" :
  577. "We are running native (64 bit mode)\n");
  578. #endif /* CONFIG_64BIT */
  579. /* Save unparsed command line copy for /proc/cmdline */
  580. strlcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
  581. *cmdline_p = COMMAND_LINE;
  582. *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
  583. ROOT_DEV = Root_RAM0;
  584. init_mm.start_code = PAGE_OFFSET;
  585. init_mm.end_code = (unsigned long) &_etext;
  586. init_mm.end_data = (unsigned long) &_edata;
  587. init_mm.brk = (unsigned long) &_end;
  588. if (MACHINE_HAS_MVCOS)
  589. memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
  590. else
  591. memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
  592. parse_early_param();
  593. setup_memory_end();
  594. setup_memory();
  595. setup_resources();
  596. setup_lowcore();
  597. cpu_init();
  598. __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
  599. smp_setup_cpu_possible_map();
  600. /*
  601. * Create kernel page tables and switch to virtual addressing.
  602. */
  603. paging_init();
  604. /* Setup default console */
  605. conmode_default();
  606. }
  607. void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
  608. {
  609. printk("cpu %d "
  610. #ifdef CONFIG_SMP
  611. "phys_idx=%d "
  612. #endif
  613. "vers=%02X ident=%06X machine=%04X unused=%04X\n",
  614. cpuinfo->cpu_nr,
  615. #ifdef CONFIG_SMP
  616. cpuinfo->cpu_addr,
  617. #endif
  618. cpuinfo->cpu_id.version,
  619. cpuinfo->cpu_id.ident,
  620. cpuinfo->cpu_id.machine,
  621. cpuinfo->cpu_id.unused);
  622. }
  623. /*
  624. * show_cpuinfo - Get information on one CPU for use by procfs.
  625. */
  626. static int show_cpuinfo(struct seq_file *m, void *v)
  627. {
  628. struct cpuinfo_S390 *cpuinfo;
  629. unsigned long n = (unsigned long) v - 1;
  630. preempt_disable();
  631. if (!n) {
  632. seq_printf(m, "vendor_id : IBM/S390\n"
  633. "# processors : %i\n"
  634. "bogomips per cpu: %lu.%02lu\n",
  635. num_online_cpus(), loops_per_jiffy/(500000/HZ),
  636. (loops_per_jiffy/(5000/HZ))%100);
  637. }
  638. if (cpu_online(n)) {
  639. #ifdef CONFIG_SMP
  640. if (smp_processor_id() == n)
  641. cpuinfo = &S390_lowcore.cpu_data;
  642. else
  643. cpuinfo = &lowcore_ptr[n]->cpu_data;
  644. #else
  645. cpuinfo = &S390_lowcore.cpu_data;
  646. #endif
  647. seq_printf(m, "processor %li: "
  648. "version = %02X, "
  649. "identification = %06X, "
  650. "machine = %04X\n",
  651. n, cpuinfo->cpu_id.version,
  652. cpuinfo->cpu_id.ident,
  653. cpuinfo->cpu_id.machine);
  654. }
  655. preempt_enable();
  656. return 0;
  657. }
  658. static void *c_start(struct seq_file *m, loff_t *pos)
  659. {
  660. return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
  661. }
  662. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  663. {
  664. ++*pos;
  665. return c_start(m, pos);
  666. }
  667. static void c_stop(struct seq_file *m, void *v)
  668. {
  669. }
  670. struct seq_operations cpuinfo_op = {
  671. .start = c_start,
  672. .next = c_next,
  673. .stop = c_stop,
  674. .show = show_cpuinfo,
  675. };