setup.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1995 Linus Torvalds
  7. * Copyright (C) 1995 Waldorf Electronics
  8. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
  9. * Copyright (C) 1996 Stoned Elipot
  10. * Copyright (C) 1999 Silicon Graphics, Inc.
  11. * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
  12. */
  13. #include <linux/init.h>
  14. #include <linux/ioport.h>
  15. #include <linux/export.h>
  16. #include <linux/screen_info.h>
  17. #include <linux/memblock.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/initrd.h>
  20. #include <linux/root_dev.h>
  21. #include <linux/highmem.h>
  22. #include <linux/console.h>
  23. #include <linux/pfn.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/kexec.h>
  26. #include <asm/addrspace.h>
  27. #include <asm/bootinfo.h>
  28. #include <asm/bugs.h>
  29. #include <asm/cache.h>
  30. #include <asm/cpu.h>
  31. #include <asm/sections.h>
  32. #include <asm/setup.h>
  33. #include <asm/smp-ops.h>
  34. #include <asm/prom.h>
  35. struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
  36. EXPORT_SYMBOL(cpu_data);
  37. #ifdef CONFIG_VT
  38. struct screen_info screen_info;
  39. #endif
  40. /*
  41. * Despite it's name this variable is even if we don't have PCI
  42. */
  43. unsigned int PCI_DMA_BUS_IS_PHYS;
  44. EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
  45. /*
  46. * Setup information
  47. *
  48. * These are initialized so they are in the .data section
  49. */
  50. unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
  51. EXPORT_SYMBOL(mips_machtype);
  52. struct boot_mem_map boot_mem_map;
  53. static char __initdata command_line[COMMAND_LINE_SIZE];
  54. char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
  55. #ifdef CONFIG_CMDLINE_BOOL
  56. static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
  57. #endif
  58. /*
  59. * mips_io_port_base is the begin of the address space to which x86 style
  60. * I/O ports are mapped.
  61. */
  62. const unsigned long mips_io_port_base = -1;
  63. EXPORT_SYMBOL(mips_io_port_base);
  64. static struct resource code_resource = { .name = "Kernel code", };
  65. static struct resource data_resource = { .name = "Kernel data", };
  66. void __init add_memory_region(phys_t start, phys_t size, long type)
  67. {
  68. int x = boot_mem_map.nr_map;
  69. int i;
  70. /* Sanity check */
  71. if (start + size < start) {
  72. pr_warning("Trying to add an invalid memory region, skipped\n");
  73. return;
  74. }
  75. /*
  76. * Try to merge with existing entry, if any.
  77. */
  78. for (i = 0; i < boot_mem_map.nr_map; i++) {
  79. struct boot_mem_map_entry *entry = boot_mem_map.map + i;
  80. unsigned long top;
  81. if (entry->type != type)
  82. continue;
  83. if (start + size < entry->addr)
  84. continue; /* no overlap */
  85. if (entry->addr + entry->size < start)
  86. continue; /* no overlap */
  87. top = max(entry->addr + entry->size, start + size);
  88. entry->addr = min(entry->addr, start);
  89. entry->size = top - entry->addr;
  90. return;
  91. }
  92. if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
  93. pr_err("Ooops! Too many entries in the memory map!\n");
  94. return;
  95. }
  96. boot_mem_map.map[x].addr = start;
  97. boot_mem_map.map[x].size = size;
  98. boot_mem_map.map[x].type = type;
  99. boot_mem_map.nr_map++;
  100. }
  101. static void __init print_memory_map(void)
  102. {
  103. int i;
  104. const int field = 2 * sizeof(unsigned long);
  105. for (i = 0; i < boot_mem_map.nr_map; i++) {
  106. printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
  107. field, (unsigned long long) boot_mem_map.map[i].size,
  108. field, (unsigned long long) boot_mem_map.map[i].addr);
  109. switch (boot_mem_map.map[i].type) {
  110. case BOOT_MEM_RAM:
  111. printk(KERN_CONT "(usable)\n");
  112. break;
  113. case BOOT_MEM_INIT_RAM:
  114. printk(KERN_CONT "(usable after init)\n");
  115. break;
  116. case BOOT_MEM_ROM_DATA:
  117. printk(KERN_CONT "(ROM data)\n");
  118. break;
  119. case BOOT_MEM_RESERVED:
  120. printk(KERN_CONT "(reserved)\n");
  121. break;
  122. default:
  123. printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
  124. break;
  125. }
  126. }
  127. }
  128. /*
  129. * Manage initrd
  130. */
  131. #ifdef CONFIG_BLK_DEV_INITRD
  132. static int __init rd_start_early(char *p)
  133. {
  134. unsigned long start = memparse(p, &p);
  135. #ifdef CONFIG_64BIT
  136. /* Guess if the sign extension was forgotten by bootloader */
  137. if (start < XKPHYS)
  138. start = (int)start;
  139. #endif
  140. initrd_start = start;
  141. initrd_end += start;
  142. return 0;
  143. }
  144. early_param("rd_start", rd_start_early);
  145. static int __init rd_size_early(char *p)
  146. {
  147. initrd_end += memparse(p, &p);
  148. return 0;
  149. }
  150. early_param("rd_size", rd_size_early);
  151. /* it returns the next free pfn after initrd */
  152. static unsigned long __init init_initrd(void)
  153. {
  154. unsigned long end;
  155. /*
  156. * Board specific code or command line parser should have
  157. * already set up initrd_start and initrd_end. In these cases
  158. * perfom sanity checks and use them if all looks good.
  159. */
  160. if (!initrd_start || initrd_end <= initrd_start)
  161. goto disable;
  162. if (initrd_start & ~PAGE_MASK) {
  163. pr_err("initrd start must be page aligned\n");
  164. goto disable;
  165. }
  166. if (initrd_start < PAGE_OFFSET) {
  167. pr_err("initrd start < PAGE_OFFSET\n");
  168. goto disable;
  169. }
  170. /*
  171. * Sanitize initrd addresses. For example firmware
  172. * can't guess if they need to pass them through
  173. * 64-bits values if the kernel has been built in pure
  174. * 32-bit. We need also to switch from KSEG0 to XKPHYS
  175. * addresses now, so the code can now safely use __pa().
  176. */
  177. end = __pa(initrd_end);
  178. initrd_end = (unsigned long)__va(end);
  179. initrd_start = (unsigned long)__va(__pa(initrd_start));
  180. ROOT_DEV = Root_RAM0;
  181. return PFN_UP(end);
  182. disable:
  183. initrd_start = 0;
  184. initrd_end = 0;
  185. return 0;
  186. }
  187. static void __init finalize_initrd(void)
  188. {
  189. unsigned long size = initrd_end - initrd_start;
  190. if (size == 0) {
  191. printk(KERN_INFO "Initrd not found or empty");
  192. goto disable;
  193. }
  194. if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
  195. printk(KERN_ERR "Initrd extends beyond end of memory");
  196. goto disable;
  197. }
  198. reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
  199. initrd_below_start_ok = 1;
  200. pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
  201. initrd_start, size);
  202. return;
  203. disable:
  204. printk(KERN_CONT " - disabling initrd\n");
  205. initrd_start = 0;
  206. initrd_end = 0;
  207. }
  208. #else /* !CONFIG_BLK_DEV_INITRD */
  209. static unsigned long __init init_initrd(void)
  210. {
  211. return 0;
  212. }
  213. #define finalize_initrd() do {} while (0)
  214. #endif
  215. /*
  216. * Initialize the bootmem allocator. It also setup initrd related data
  217. * if needed.
  218. */
  219. #ifdef CONFIG_SGI_IP27
  220. static void __init bootmem_init(void)
  221. {
  222. init_initrd();
  223. finalize_initrd();
  224. }
  225. #else /* !CONFIG_SGI_IP27 */
  226. static void __init bootmem_init(void)
  227. {
  228. unsigned long reserved_end;
  229. unsigned long mapstart = ~0UL;
  230. unsigned long bootmap_size;
  231. int i;
  232. /*
  233. * Init any data related to initrd. It's a nop if INITRD is
  234. * not selected. Once that done we can determine the low bound
  235. * of usable memory.
  236. */
  237. reserved_end = max(init_initrd(),
  238. (unsigned long) PFN_UP(__pa_symbol(&_end)));
  239. /*
  240. * max_low_pfn is not a number of pages. The number of pages
  241. * of the system is given by 'max_low_pfn - min_low_pfn'.
  242. */
  243. min_low_pfn = ~0UL;
  244. max_low_pfn = 0;
  245. /*
  246. * Find the highest page frame number we have available.
  247. */
  248. for (i = 0; i < boot_mem_map.nr_map; i++) {
  249. unsigned long start, end;
  250. if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
  251. continue;
  252. start = PFN_UP(boot_mem_map.map[i].addr);
  253. end = PFN_DOWN(boot_mem_map.map[i].addr
  254. + boot_mem_map.map[i].size);
  255. if (end > max_low_pfn)
  256. max_low_pfn = end;
  257. if (start < min_low_pfn)
  258. min_low_pfn = start;
  259. if (end <= reserved_end)
  260. continue;
  261. if (start >= mapstart)
  262. continue;
  263. mapstart = max(reserved_end, start);
  264. }
  265. if (min_low_pfn >= max_low_pfn)
  266. panic("Incorrect memory mapping !!!");
  267. if (min_low_pfn > ARCH_PFN_OFFSET) {
  268. pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
  269. (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
  270. min_low_pfn - ARCH_PFN_OFFSET);
  271. } else if (min_low_pfn < ARCH_PFN_OFFSET) {
  272. pr_info("%lu free pages won't be used\n",
  273. ARCH_PFN_OFFSET - min_low_pfn);
  274. }
  275. min_low_pfn = ARCH_PFN_OFFSET;
  276. /*
  277. * Determine low and high memory ranges
  278. */
  279. max_pfn = max_low_pfn;
  280. if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
  281. #ifdef CONFIG_HIGHMEM
  282. highstart_pfn = PFN_DOWN(HIGHMEM_START);
  283. highend_pfn = max_low_pfn;
  284. #endif
  285. max_low_pfn = PFN_DOWN(HIGHMEM_START);
  286. }
  287. /*
  288. * Initialize the boot-time allocator with low memory only.
  289. */
  290. bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
  291. min_low_pfn, max_low_pfn);
  292. for (i = 0; i < boot_mem_map.nr_map; i++) {
  293. unsigned long start, end;
  294. start = PFN_UP(boot_mem_map.map[i].addr);
  295. end = PFN_DOWN(boot_mem_map.map[i].addr
  296. + boot_mem_map.map[i].size);
  297. if (start <= min_low_pfn)
  298. start = min_low_pfn;
  299. if (start >= end)
  300. continue;
  301. #ifndef CONFIG_HIGHMEM
  302. if (end > max_low_pfn)
  303. end = max_low_pfn;
  304. /*
  305. * ... finally, is the area going away?
  306. */
  307. if (end <= start)
  308. continue;
  309. #endif
  310. memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
  311. }
  312. /*
  313. * Register fully available low RAM pages with the bootmem allocator.
  314. */
  315. for (i = 0; i < boot_mem_map.nr_map; i++) {
  316. unsigned long start, end, size;
  317. start = PFN_UP(boot_mem_map.map[i].addr);
  318. end = PFN_DOWN(boot_mem_map.map[i].addr
  319. + boot_mem_map.map[i].size);
  320. /*
  321. * Reserve usable memory.
  322. */
  323. switch (boot_mem_map.map[i].type) {
  324. case BOOT_MEM_RAM:
  325. break;
  326. case BOOT_MEM_INIT_RAM:
  327. memory_present(0, start, end);
  328. continue;
  329. default:
  330. /* Not usable memory */
  331. continue;
  332. }
  333. /*
  334. * We are rounding up the start address of usable memory
  335. * and at the end of the usable range downwards.
  336. */
  337. if (start >= max_low_pfn)
  338. continue;
  339. if (start < reserved_end)
  340. start = reserved_end;
  341. if (end > max_low_pfn)
  342. end = max_low_pfn;
  343. /*
  344. * ... finally, is the area going away?
  345. */
  346. if (end <= start)
  347. continue;
  348. size = end - start;
  349. /* Register lowmem ranges */
  350. free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
  351. memory_present(0, start, end);
  352. }
  353. /*
  354. * Reserve the bootmap memory.
  355. */
  356. reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
  357. /*
  358. * Reserve initrd memory if needed.
  359. */
  360. finalize_initrd();
  361. }
  362. #endif /* CONFIG_SGI_IP27 */
  363. /*
  364. * arch_mem_init - initialize memory management subsystem
  365. *
  366. * o plat_mem_setup() detects the memory configuration and will record detected
  367. * memory areas using add_memory_region.
  368. *
  369. * At this stage the memory configuration of the system is known to the
  370. * kernel but generic memory management system is still entirely uninitialized.
  371. *
  372. * o bootmem_init()
  373. * o sparse_init()
  374. * o paging_init()
  375. *
  376. * At this stage the bootmem allocator is ready to use.
  377. *
  378. * NOTE: historically plat_mem_setup did the entire platform initialization.
  379. * This was rather impractical because it meant plat_mem_setup had to
  380. * get away without any kind of memory allocator. To keep old code from
  381. * breaking plat_setup was just renamed to plat_setup and a second platform
  382. * initialization hook for anything else was introduced.
  383. */
  384. static int usermem __initdata;
  385. static int __init early_parse_mem(char *p)
  386. {
  387. unsigned long start, size;
  388. /*
  389. * If a user specifies memory size, we
  390. * blow away any automatically generated
  391. * size.
  392. */
  393. if (usermem == 0) {
  394. boot_mem_map.nr_map = 0;
  395. usermem = 1;
  396. }
  397. start = 0;
  398. size = memparse(p, &p);
  399. if (*p == '@')
  400. start = memparse(p + 1, &p);
  401. add_memory_region(start, size, BOOT_MEM_RAM);
  402. return 0;
  403. }
  404. early_param("mem", early_parse_mem);
  405. #ifdef CONFIG_PROC_VMCORE
  406. unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
  407. static int __init early_parse_elfcorehdr(char *p)
  408. {
  409. int i;
  410. setup_elfcorehdr = memparse(p, &p);
  411. for (i = 0; i < boot_mem_map.nr_map; i++) {
  412. unsigned long start = boot_mem_map.map[i].addr;
  413. unsigned long end = (boot_mem_map.map[i].addr +
  414. boot_mem_map.map[i].size);
  415. if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
  416. /*
  417. * Reserve from the elf core header to the end of
  418. * the memory segment, that should all be kdump
  419. * reserved memory.
  420. */
  421. setup_elfcorehdr_size = end - setup_elfcorehdr;
  422. break;
  423. }
  424. }
  425. /*
  426. * If we don't find it in the memory map, then we shouldn't
  427. * have to worry about it, as the new kernel won't use it.
  428. */
  429. return 0;
  430. }
  431. early_param("elfcorehdr", early_parse_elfcorehdr);
  432. #endif
  433. static void __init arch_mem_addpart(phys_t mem, phys_t end, int type)
  434. {
  435. phys_t size;
  436. int i;
  437. size = end - mem;
  438. if (!size)
  439. return;
  440. /* Make sure it is in the boot_mem_map */
  441. for (i = 0; i < boot_mem_map.nr_map; i++) {
  442. if (mem >= boot_mem_map.map[i].addr &&
  443. mem < (boot_mem_map.map[i].addr +
  444. boot_mem_map.map[i].size))
  445. return;
  446. }
  447. add_memory_region(mem, size, type);
  448. }
  449. static void __init arch_mem_init(char **cmdline_p)
  450. {
  451. extern void plat_mem_setup(void);
  452. /* call board setup routine */
  453. plat_mem_setup();
  454. /*
  455. * Make sure all kernel memory is in the maps. The "UP" and
  456. * "DOWN" are opposite for initdata since if it crosses over
  457. * into another memory section you don't want that to be
  458. * freed when the initdata is freed.
  459. */
  460. arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
  461. PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
  462. BOOT_MEM_RAM);
  463. arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
  464. PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
  465. BOOT_MEM_INIT_RAM);
  466. pr_info("Determined physical RAM map:\n");
  467. print_memory_map();
  468. #ifdef CONFIG_CMDLINE_BOOL
  469. #ifdef CONFIG_CMDLINE_OVERRIDE
  470. strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
  471. #else
  472. if (builtin_cmdline[0]) {
  473. strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
  474. strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
  475. }
  476. strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
  477. #endif
  478. #else
  479. strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
  480. #endif
  481. strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
  482. *cmdline_p = command_line;
  483. parse_early_param();
  484. if (usermem) {
  485. pr_info("User-defined physical RAM map:\n");
  486. print_memory_map();
  487. }
  488. bootmem_init();
  489. #ifdef CONFIG_PROC_VMCORE
  490. if (setup_elfcorehdr && setup_elfcorehdr_size) {
  491. printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
  492. setup_elfcorehdr, setup_elfcorehdr_size);
  493. reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
  494. BOOTMEM_DEFAULT);
  495. }
  496. #endif
  497. #ifdef CONFIG_KEXEC
  498. if (crashk_res.start != crashk_res.end)
  499. reserve_bootmem(crashk_res.start,
  500. crashk_res.end - crashk_res.start + 1,
  501. BOOTMEM_DEFAULT);
  502. #endif
  503. device_tree_init();
  504. sparse_init();
  505. plat_swiotlb_setup();
  506. paging_init();
  507. }
  508. #ifdef CONFIG_KEXEC
  509. static inline unsigned long long get_total_mem(void)
  510. {
  511. unsigned long long total;
  512. total = max_pfn - min_low_pfn;
  513. return total << PAGE_SHIFT;
  514. }
  515. static void __init mips_parse_crashkernel(void)
  516. {
  517. unsigned long long total_mem;
  518. unsigned long long crash_size, crash_base;
  519. int ret;
  520. total_mem = get_total_mem();
  521. ret = parse_crashkernel(boot_command_line, total_mem,
  522. &crash_size, &crash_base);
  523. if (ret != 0 || crash_size <= 0)
  524. return;
  525. crashk_res.start = crash_base;
  526. crashk_res.end = crash_base + crash_size - 1;
  527. }
  528. static void __init request_crashkernel(struct resource *res)
  529. {
  530. int ret;
  531. ret = request_resource(res, &crashk_res);
  532. if (!ret)
  533. pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
  534. (unsigned long)((crashk_res.end -
  535. crashk_res.start + 1) >> 20),
  536. (unsigned long)(crashk_res.start >> 20));
  537. }
  538. #else /* !defined(CONFIG_KEXEC) */
  539. static void __init mips_parse_crashkernel(void)
  540. {
  541. }
  542. static void __init request_crashkernel(struct resource *res)
  543. {
  544. }
  545. #endif /* !defined(CONFIG_KEXEC) */
  546. static void __init resource_init(void)
  547. {
  548. int i;
  549. if (UNCAC_BASE != IO_BASE)
  550. return;
  551. code_resource.start = __pa_symbol(&_text);
  552. code_resource.end = __pa_symbol(&_etext) - 1;
  553. data_resource.start = __pa_symbol(&_etext);
  554. data_resource.end = __pa_symbol(&_edata) - 1;
  555. /*
  556. * Request address space for all standard RAM.
  557. */
  558. mips_parse_crashkernel();
  559. for (i = 0; i < boot_mem_map.nr_map; i++) {
  560. struct resource *res;
  561. unsigned long start, end;
  562. start = boot_mem_map.map[i].addr;
  563. end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
  564. if (start >= HIGHMEM_START)
  565. continue;
  566. if (end >= HIGHMEM_START)
  567. end = HIGHMEM_START - 1;
  568. res = alloc_bootmem(sizeof(struct resource));
  569. switch (boot_mem_map.map[i].type) {
  570. case BOOT_MEM_RAM:
  571. case BOOT_MEM_INIT_RAM:
  572. case BOOT_MEM_ROM_DATA:
  573. res->name = "System RAM";
  574. break;
  575. case BOOT_MEM_RESERVED:
  576. default:
  577. res->name = "reserved";
  578. }
  579. res->start = start;
  580. res->end = end;
  581. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  582. request_resource(&iomem_resource, res);
  583. /*
  584. * We don't know which RAM region contains kernel data,
  585. * so we try it repeatedly and let the resource manager
  586. * test it.
  587. */
  588. request_resource(res, &code_resource);
  589. request_resource(res, &data_resource);
  590. request_crashkernel(res);
  591. }
  592. }
  593. void __init setup_arch(char **cmdline_p)
  594. {
  595. cpu_probe();
  596. prom_init();
  597. #ifdef CONFIG_EARLY_PRINTK
  598. setup_early_printk();
  599. #endif
  600. cpu_report();
  601. check_bugs_early();
  602. #if defined(CONFIG_VT)
  603. #if defined(CONFIG_VGA_CONSOLE)
  604. conswitchp = &vga_con;
  605. #elif defined(CONFIG_DUMMY_CONSOLE)
  606. conswitchp = &dummy_con;
  607. #endif
  608. #endif
  609. arch_mem_init(cmdline_p);
  610. resource_init();
  611. plat_smp_setup();
  612. cpu_cache_init();
  613. }
  614. unsigned long kernelsp[NR_CPUS];
  615. unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
  616. #ifdef CONFIG_DEBUG_FS
  617. struct dentry *mips_debugfs_dir;
  618. static int __init debugfs_mips(void)
  619. {
  620. struct dentry *d;
  621. d = debugfs_create_dir("mips", NULL);
  622. if (!d)
  623. return -ENOMEM;
  624. mips_debugfs_dir = d;
  625. return 0;
  626. }
  627. arch_initcall(debugfs_mips);
  628. #endif