setup.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016
  1. /*
  2. * linux/arch/arm/kernel/setup.c
  3. *
  4. * Copyright (C) 1995-2001 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/stddef.h>
  13. #include <linux/ioport.h>
  14. #include <linux/delay.h>
  15. #include <linux/utsname.h>
  16. #include <linux/initrd.h>
  17. #include <linux/console.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/screen_info.h>
  21. #include <linux/init.h>
  22. #include <linux/kexec.h>
  23. #include <linux/crash_dump.h>
  24. #include <linux/root_dev.h>
  25. #include <linux/cpu.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/smp.h>
  28. #include <linux/fs.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/memblock.h>
  31. #include <asm/unified.h>
  32. #include <asm/cpu.h>
  33. #include <asm/cputype.h>
  34. #include <asm/elf.h>
  35. #include <asm/procinfo.h>
  36. #include <asm/sections.h>
  37. #include <asm/setup.h>
  38. #include <asm/smp_plat.h>
  39. #include <asm/mach-types.h>
  40. #include <asm/cacheflush.h>
  41. #include <asm/cachetype.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/mach/arch.h>
  44. #include <asm/mach/irq.h>
  45. #include <asm/mach/time.h>
  46. #include <asm/traps.h>
  47. #include <asm/unwind.h>
  48. #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
  49. #include "compat.h"
  50. #endif
  51. #include "atags.h"
  52. #include "tcm.h"
  53. #ifndef MEM_SIZE
  54. #define MEM_SIZE (16*1024*1024)
  55. #endif
  56. #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  57. char fpe_type[8];
  58. static int __init fpe_setup(char *line)
  59. {
  60. memcpy(fpe_type, line, 8);
  61. return 1;
  62. }
  63. __setup("fpe=", fpe_setup);
  64. #endif
  65. extern void paging_init(struct machine_desc *desc);
  66. extern void reboot_setup(char *str);
  67. unsigned int processor_id;
  68. EXPORT_SYMBOL(processor_id);
  69. unsigned int __machine_arch_type __read_mostly;
  70. EXPORT_SYMBOL(__machine_arch_type);
  71. unsigned int cacheid __read_mostly;
  72. EXPORT_SYMBOL(cacheid);
  73. unsigned int __atags_pointer __initdata;
  74. unsigned int system_rev;
  75. EXPORT_SYMBOL(system_rev);
  76. unsigned int system_serial_low;
  77. EXPORT_SYMBOL(system_serial_low);
  78. unsigned int system_serial_high;
  79. EXPORT_SYMBOL(system_serial_high);
  80. unsigned int elf_hwcap __read_mostly;
  81. EXPORT_SYMBOL(elf_hwcap);
  82. #ifdef MULTI_CPU
  83. struct processor processor __read_mostly;
  84. #endif
  85. #ifdef MULTI_TLB
  86. struct cpu_tlb_fns cpu_tlb __read_mostly;
  87. #endif
  88. #ifdef MULTI_USER
  89. struct cpu_user_fns cpu_user __read_mostly;
  90. #endif
  91. #ifdef MULTI_CACHE
  92. struct cpu_cache_fns cpu_cache __read_mostly;
  93. #endif
  94. #ifdef CONFIG_OUTER_CACHE
  95. struct outer_cache_fns outer_cache __read_mostly;
  96. EXPORT_SYMBOL(outer_cache);
  97. #endif
  98. struct stack {
  99. u32 irq[3];
  100. u32 abt[3];
  101. u32 und[3];
  102. } ____cacheline_aligned;
  103. static struct stack stacks[NR_CPUS];
  104. char elf_platform[ELF_PLATFORM_SIZE];
  105. EXPORT_SYMBOL(elf_platform);
  106. static const char *cpu_name;
  107. static const char *machine_name;
  108. static char __initdata cmd_line[COMMAND_LINE_SIZE];
  109. struct machine_desc *machine_desc __initdata;
  110. static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
  111. static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
  112. #define ENDIANNESS ((char)endian_test.l)
  113. DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
  114. /*
  115. * Standard memory resources
  116. */
  117. static struct resource mem_res[] = {
  118. {
  119. .name = "Video RAM",
  120. .start = 0,
  121. .end = 0,
  122. .flags = IORESOURCE_MEM
  123. },
  124. {
  125. .name = "Kernel text",
  126. .start = 0,
  127. .end = 0,
  128. .flags = IORESOURCE_MEM
  129. },
  130. {
  131. .name = "Kernel data",
  132. .start = 0,
  133. .end = 0,
  134. .flags = IORESOURCE_MEM
  135. }
  136. };
  137. #define video_ram mem_res[0]
  138. #define kernel_code mem_res[1]
  139. #define kernel_data mem_res[2]
  140. static struct resource io_res[] = {
  141. {
  142. .name = "reserved",
  143. .start = 0x3bc,
  144. .end = 0x3be,
  145. .flags = IORESOURCE_IO | IORESOURCE_BUSY
  146. },
  147. {
  148. .name = "reserved",
  149. .start = 0x378,
  150. .end = 0x37f,
  151. .flags = IORESOURCE_IO | IORESOURCE_BUSY
  152. },
  153. {
  154. .name = "reserved",
  155. .start = 0x278,
  156. .end = 0x27f,
  157. .flags = IORESOURCE_IO | IORESOURCE_BUSY
  158. }
  159. };
  160. #define lp0 io_res[0]
  161. #define lp1 io_res[1]
  162. #define lp2 io_res[2]
  163. static const char *proc_arch[] = {
  164. "undefined/unknown",
  165. "3",
  166. "4",
  167. "4T",
  168. "5",
  169. "5T",
  170. "5TE",
  171. "5TEJ",
  172. "6TEJ",
  173. "7",
  174. "?(11)",
  175. "?(12)",
  176. "?(13)",
  177. "?(14)",
  178. "?(15)",
  179. "?(16)",
  180. "?(17)",
  181. };
  182. int cpu_architecture(void)
  183. {
  184. int cpu_arch;
  185. if ((read_cpuid_id() & 0x0008f000) == 0) {
  186. cpu_arch = CPU_ARCH_UNKNOWN;
  187. } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
  188. cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
  189. } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
  190. cpu_arch = (read_cpuid_id() >> 16) & 7;
  191. if (cpu_arch)
  192. cpu_arch += CPU_ARCH_ARMv3;
  193. } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
  194. unsigned int mmfr0;
  195. /* Revised CPUID format. Read the Memory Model Feature
  196. * Register 0 and check for VMSAv7 or PMSAv7 */
  197. asm("mrc p15, 0, %0, c0, c1, 4"
  198. : "=r" (mmfr0));
  199. if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
  200. (mmfr0 & 0x000000f0) >= 0x00000030)
  201. cpu_arch = CPU_ARCH_ARMv7;
  202. else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
  203. (mmfr0 & 0x000000f0) == 0x00000020)
  204. cpu_arch = CPU_ARCH_ARMv6;
  205. else
  206. cpu_arch = CPU_ARCH_UNKNOWN;
  207. } else
  208. cpu_arch = CPU_ARCH_UNKNOWN;
  209. return cpu_arch;
  210. }
  211. static int cpu_has_aliasing_icache(unsigned int arch)
  212. {
  213. int aliasing_icache;
  214. unsigned int id_reg, num_sets, line_size;
  215. /* arch specifies the register format */
  216. switch (arch) {
  217. case CPU_ARCH_ARMv7:
  218. asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
  219. : /* No output operands */
  220. : "r" (1));
  221. isb();
  222. asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
  223. : "=r" (id_reg));
  224. line_size = 4 << ((id_reg & 0x7) + 2);
  225. num_sets = ((id_reg >> 13) & 0x7fff) + 1;
  226. aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
  227. break;
  228. case CPU_ARCH_ARMv6:
  229. aliasing_icache = read_cpuid_cachetype() & (1 << 11);
  230. break;
  231. default:
  232. /* I-cache aliases will be handled by D-cache aliasing code */
  233. aliasing_icache = 0;
  234. }
  235. return aliasing_icache;
  236. }
  237. static void __init cacheid_init(void)
  238. {
  239. unsigned int cachetype = read_cpuid_cachetype();
  240. unsigned int arch = cpu_architecture();
  241. if (arch >= CPU_ARCH_ARMv6) {
  242. if ((cachetype & (7 << 29)) == 4 << 29) {
  243. /* ARMv7 register format */
  244. cacheid = CACHEID_VIPT_NONALIASING;
  245. if ((cachetype & (3 << 14)) == 1 << 14)
  246. cacheid |= CACHEID_ASID_TAGGED;
  247. else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
  248. cacheid |= CACHEID_VIPT_I_ALIASING;
  249. } else if (cachetype & (1 << 23)) {
  250. cacheid = CACHEID_VIPT_ALIASING;
  251. } else {
  252. cacheid = CACHEID_VIPT_NONALIASING;
  253. if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
  254. cacheid |= CACHEID_VIPT_I_ALIASING;
  255. }
  256. } else {
  257. cacheid = CACHEID_VIVT;
  258. }
  259. printk("CPU: %s data cache, %s instruction cache\n",
  260. cache_is_vivt() ? "VIVT" :
  261. cache_is_vipt_aliasing() ? "VIPT aliasing" :
  262. cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
  263. cache_is_vivt() ? "VIVT" :
  264. icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
  265. icache_is_vipt_aliasing() ? "VIPT aliasing" :
  266. cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
  267. }
  268. /*
  269. * These functions re-use the assembly code in head.S, which
  270. * already provide the required functionality.
  271. */
  272. extern struct proc_info_list *lookup_processor_type(unsigned int);
  273. extern struct machine_desc *lookup_machine_type(unsigned int);
  274. static void __init feat_v6_fixup(void)
  275. {
  276. int id = read_cpuid_id();
  277. if ((id & 0xff0f0000) != 0x41070000)
  278. return;
  279. /*
  280. * HWCAP_TLS is available only on 1136 r1p0 and later,
  281. * see also kuser_get_tls_init.
  282. */
  283. if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
  284. elf_hwcap &= ~HWCAP_TLS;
  285. }
  286. static void __init setup_processor(void)
  287. {
  288. struct proc_info_list *list;
  289. /*
  290. * locate processor in the list of supported processor
  291. * types. The linker builds this table for us from the
  292. * entries in arch/arm/mm/proc-*.S
  293. */
  294. list = lookup_processor_type(read_cpuid_id());
  295. if (!list) {
  296. printk("CPU configuration botched (ID %08x), unable "
  297. "to continue.\n", read_cpuid_id());
  298. while (1);
  299. }
  300. cpu_name = list->cpu_name;
  301. #ifdef MULTI_CPU
  302. processor = *list->proc;
  303. #endif
  304. #ifdef MULTI_TLB
  305. cpu_tlb = *list->tlb;
  306. #endif
  307. #ifdef MULTI_USER
  308. cpu_user = *list->user;
  309. #endif
  310. #ifdef MULTI_CACHE
  311. cpu_cache = *list->cache;
  312. #endif
  313. printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
  314. cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
  315. proc_arch[cpu_architecture()], cr_alignment);
  316. sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
  317. sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
  318. elf_hwcap = list->elf_hwcap;
  319. #ifndef CONFIG_ARM_THUMB
  320. elf_hwcap &= ~HWCAP_THUMB;
  321. #endif
  322. feat_v6_fixup();
  323. cacheid_init();
  324. cpu_proc_init();
  325. }
  326. /*
  327. * cpu_init - initialise one CPU.
  328. *
  329. * cpu_init sets up the per-CPU stacks.
  330. */
  331. void cpu_init(void)
  332. {
  333. unsigned int cpu = smp_processor_id();
  334. struct stack *stk = &stacks[cpu];
  335. if (cpu >= NR_CPUS) {
  336. printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
  337. BUG();
  338. }
  339. /*
  340. * Define the placement constraint for the inline asm directive below.
  341. * In Thumb-2, msr with an immediate value is not allowed.
  342. */
  343. #ifdef CONFIG_THUMB2_KERNEL
  344. #define PLC "r"
  345. #else
  346. #define PLC "I"
  347. #endif
  348. /*
  349. * setup stacks for re-entrant exception handlers
  350. */
  351. __asm__ (
  352. "msr cpsr_c, %1\n\t"
  353. "add r14, %0, %2\n\t"
  354. "mov sp, r14\n\t"
  355. "msr cpsr_c, %3\n\t"
  356. "add r14, %0, %4\n\t"
  357. "mov sp, r14\n\t"
  358. "msr cpsr_c, %5\n\t"
  359. "add r14, %0, %6\n\t"
  360. "mov sp, r14\n\t"
  361. "msr cpsr_c, %7"
  362. :
  363. : "r" (stk),
  364. PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
  365. "I" (offsetof(struct stack, irq[0])),
  366. PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
  367. "I" (offsetof(struct stack, abt[0])),
  368. PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
  369. "I" (offsetof(struct stack, und[0])),
  370. PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
  371. : "r14");
  372. }
  373. static struct machine_desc * __init setup_machine(unsigned int nr)
  374. {
  375. struct machine_desc *list;
  376. /*
  377. * locate machine in the list of supported machines.
  378. */
  379. list = lookup_machine_type(nr);
  380. if (!list) {
  381. printk("Machine configuration botched (nr %d), unable "
  382. "to continue.\n", nr);
  383. while (1);
  384. }
  385. printk("Machine: %s\n", list->name);
  386. return list;
  387. }
  388. static int __init arm_add_memory(unsigned long start, unsigned long size)
  389. {
  390. struct membank *bank = &meminfo.bank[meminfo.nr_banks];
  391. if (meminfo.nr_banks >= NR_BANKS) {
  392. printk(KERN_CRIT "NR_BANKS too low, "
  393. "ignoring memory at %#lx\n", start);
  394. return -EINVAL;
  395. }
  396. /*
  397. * Ensure that start/size are aligned to a page boundary.
  398. * Size is appropriately rounded down, start is rounded up.
  399. */
  400. size -= start & ~PAGE_MASK;
  401. bank->start = PAGE_ALIGN(start);
  402. bank->size = size & PAGE_MASK;
  403. /*
  404. * Check whether this memory region has non-zero size or
  405. * invalid node number.
  406. */
  407. if (bank->size == 0)
  408. return -EINVAL;
  409. meminfo.nr_banks++;
  410. return 0;
  411. }
  412. /*
  413. * Pick out the memory size. We look for mem=size@start,
  414. * where start and size are "size[KkMm]"
  415. */
  416. static int __init early_mem(char *p)
  417. {
  418. static int usermem __initdata = 0;
  419. unsigned long size, start;
  420. char *endp;
  421. /*
  422. * If the user specifies memory size, we
  423. * blow away any automatically generated
  424. * size.
  425. */
  426. if (usermem == 0) {
  427. usermem = 1;
  428. meminfo.nr_banks = 0;
  429. }
  430. start = PHYS_OFFSET;
  431. size = memparse(p, &endp);
  432. if (*endp == '@')
  433. start = memparse(endp + 1, NULL);
  434. arm_add_memory(start, size);
  435. return 0;
  436. }
  437. early_param("mem", early_mem);
  438. static void __init
  439. setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
  440. {
  441. #ifdef CONFIG_BLK_DEV_RAM
  442. extern int rd_size, rd_image_start, rd_prompt, rd_doload;
  443. rd_image_start = image_start;
  444. rd_prompt = prompt;
  445. rd_doload = doload;
  446. if (rd_sz)
  447. rd_size = rd_sz;
  448. #endif
  449. }
  450. static void __init request_standard_resources(struct machine_desc *mdesc)
  451. {
  452. struct memblock_region *region;
  453. struct resource *res;
  454. kernel_code.start = virt_to_phys(_text);
  455. kernel_code.end = virt_to_phys(_etext - 1);
  456. kernel_data.start = virt_to_phys(_sdata);
  457. kernel_data.end = virt_to_phys(_end - 1);
  458. for_each_memblock(memory, region) {
  459. res = alloc_bootmem_low(sizeof(*res));
  460. res->name = "System RAM";
  461. res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
  462. res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
  463. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  464. request_resource(&iomem_resource, res);
  465. if (kernel_code.start >= res->start &&
  466. kernel_code.end <= res->end)
  467. request_resource(res, &kernel_code);
  468. if (kernel_data.start >= res->start &&
  469. kernel_data.end <= res->end)
  470. request_resource(res, &kernel_data);
  471. }
  472. if (mdesc->video_start) {
  473. video_ram.start = mdesc->video_start;
  474. video_ram.end = mdesc->video_end;
  475. request_resource(&iomem_resource, &video_ram);
  476. }
  477. /*
  478. * Some machines don't have the possibility of ever
  479. * possessing lp0, lp1 or lp2
  480. */
  481. if (mdesc->reserve_lp0)
  482. request_resource(&ioport_resource, &lp0);
  483. if (mdesc->reserve_lp1)
  484. request_resource(&ioport_resource, &lp1);
  485. if (mdesc->reserve_lp2)
  486. request_resource(&ioport_resource, &lp2);
  487. }
  488. /*
  489. * Tag parsing.
  490. *
  491. * This is the new way of passing data to the kernel at boot time. Rather
  492. * than passing a fixed inflexible structure to the kernel, we pass a list
  493. * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
  494. * tag for the list to be recognised (to distinguish the tagged list from
  495. * a param_struct). The list is terminated with a zero-length tag (this tag
  496. * is not parsed in any way).
  497. */
  498. static int __init parse_tag_core(const struct tag *tag)
  499. {
  500. if (tag->hdr.size > 2) {
  501. if ((tag->u.core.flags & 1) == 0)
  502. root_mountflags &= ~MS_RDONLY;
  503. ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
  504. }
  505. return 0;
  506. }
  507. __tagtable(ATAG_CORE, parse_tag_core);
  508. static int __init parse_tag_mem32(const struct tag *tag)
  509. {
  510. return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
  511. }
  512. __tagtable(ATAG_MEM, parse_tag_mem32);
  513. #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
  514. struct screen_info screen_info = {
  515. .orig_video_lines = 30,
  516. .orig_video_cols = 80,
  517. .orig_video_mode = 0,
  518. .orig_video_ega_bx = 0,
  519. .orig_video_isVGA = 1,
  520. .orig_video_points = 8
  521. };
  522. static int __init parse_tag_videotext(const struct tag *tag)
  523. {
  524. screen_info.orig_x = tag->u.videotext.x;
  525. screen_info.orig_y = tag->u.videotext.y;
  526. screen_info.orig_video_page = tag->u.videotext.video_page;
  527. screen_info.orig_video_mode = tag->u.videotext.video_mode;
  528. screen_info.orig_video_cols = tag->u.videotext.video_cols;
  529. screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
  530. screen_info.orig_video_lines = tag->u.videotext.video_lines;
  531. screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
  532. screen_info.orig_video_points = tag->u.videotext.video_points;
  533. return 0;
  534. }
  535. __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
  536. #endif
  537. static int __init parse_tag_ramdisk(const struct tag *tag)
  538. {
  539. setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
  540. (tag->u.ramdisk.flags & 2) == 0,
  541. tag->u.ramdisk.start, tag->u.ramdisk.size);
  542. return 0;
  543. }
  544. __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
  545. static int __init parse_tag_serialnr(const struct tag *tag)
  546. {
  547. system_serial_low = tag->u.serialnr.low;
  548. system_serial_high = tag->u.serialnr.high;
  549. return 0;
  550. }
  551. __tagtable(ATAG_SERIAL, parse_tag_serialnr);
  552. static int __init parse_tag_revision(const struct tag *tag)
  553. {
  554. system_rev = tag->u.revision.rev;
  555. return 0;
  556. }
  557. __tagtable(ATAG_REVISION, parse_tag_revision);
  558. static int __init parse_tag_cmdline(const struct tag *tag)
  559. {
  560. #ifndef CONFIG_CMDLINE_FORCE
  561. strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
  562. #else
  563. pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
  564. #endif /* CONFIG_CMDLINE_FORCE */
  565. return 0;
  566. }
  567. __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
  568. /*
  569. * Scan the tag table for this tag, and call its parse function.
  570. * The tag table is built by the linker from all the __tagtable
  571. * declarations.
  572. */
  573. static int __init parse_tag(const struct tag *tag)
  574. {
  575. extern struct tagtable __tagtable_begin, __tagtable_end;
  576. struct tagtable *t;
  577. for (t = &__tagtable_begin; t < &__tagtable_end; t++)
  578. if (tag->hdr.tag == t->tag) {
  579. t->parse(tag);
  580. break;
  581. }
  582. return t < &__tagtable_end;
  583. }
  584. /*
  585. * Parse all tags in the list, checking both the global and architecture
  586. * specific tag tables.
  587. */
  588. static void __init parse_tags(const struct tag *t)
  589. {
  590. for (; t->hdr.size; t = tag_next(t))
  591. if (!parse_tag(t))
  592. printk(KERN_WARNING
  593. "Ignoring unrecognised tag 0x%08x\n",
  594. t->hdr.tag);
  595. }
  596. /*
  597. * This holds our defaults.
  598. */
  599. static struct init_tags {
  600. struct tag_header hdr1;
  601. struct tag_core core;
  602. struct tag_header hdr2;
  603. struct tag_mem32 mem;
  604. struct tag_header hdr3;
  605. } init_tags __initdata = {
  606. { tag_size(tag_core), ATAG_CORE },
  607. { 1, PAGE_SIZE, 0xff },
  608. { tag_size(tag_mem32), ATAG_MEM },
  609. { MEM_SIZE, PHYS_OFFSET },
  610. { 0, ATAG_NONE }
  611. };
  612. static int __init customize_machine(void)
  613. {
  614. /* customizes platform devices, or adds new ones */
  615. if (machine_desc->init_machine)
  616. machine_desc->init_machine();
  617. return 0;
  618. }
  619. arch_initcall(customize_machine);
  620. #ifdef CONFIG_KEXEC
  621. static inline unsigned long long get_total_mem(void)
  622. {
  623. unsigned long total;
  624. total = max_low_pfn - min_low_pfn;
  625. return total << PAGE_SHIFT;
  626. }
  627. /**
  628. * reserve_crashkernel() - reserves memory are for crash kernel
  629. *
  630. * This function reserves memory area given in "crashkernel=" kernel command
  631. * line parameter. The memory reserved is used by a dump capture kernel when
  632. * primary kernel is crashing.
  633. */
  634. static void __init reserve_crashkernel(void)
  635. {
  636. unsigned long long crash_size, crash_base;
  637. unsigned long long total_mem;
  638. int ret;
  639. total_mem = get_total_mem();
  640. ret = parse_crashkernel(boot_command_line, total_mem,
  641. &crash_size, &crash_base);
  642. if (ret)
  643. return;
  644. ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
  645. if (ret < 0) {
  646. printk(KERN_WARNING "crashkernel reservation failed - "
  647. "memory is in use (0x%lx)\n", (unsigned long)crash_base);
  648. return;
  649. }
  650. printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
  651. "for crashkernel (System RAM: %ldMB)\n",
  652. (unsigned long)(crash_size >> 20),
  653. (unsigned long)(crash_base >> 20),
  654. (unsigned long)(total_mem >> 20));
  655. crashk_res.start = crash_base;
  656. crashk_res.end = crash_base + crash_size - 1;
  657. insert_resource(&iomem_resource, &crashk_res);
  658. }
  659. #else
  660. static inline void reserve_crashkernel(void) {}
  661. #endif /* CONFIG_KEXEC */
  662. /*
  663. * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
  664. * is_kdump_kernel() to determine if we are booting after a panic. Hence
  665. * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
  666. */
  667. #ifdef CONFIG_CRASH_DUMP
  668. /*
  669. * elfcorehdr= specifies the location of elf core header stored by the crashed
  670. * kernel. This option will be passed by kexec loader to the capture kernel.
  671. */
  672. static int __init setup_elfcorehdr(char *arg)
  673. {
  674. char *end;
  675. if (!arg)
  676. return -EINVAL;
  677. elfcorehdr_addr = memparse(arg, &end);
  678. return end > arg ? 0 : -EINVAL;
  679. }
  680. early_param("elfcorehdr", setup_elfcorehdr);
  681. #endif /* CONFIG_CRASH_DUMP */
  682. static void __init squash_mem_tags(struct tag *tag)
  683. {
  684. for (; tag->hdr.size; tag = tag_next(tag))
  685. if (tag->hdr.tag == ATAG_MEM)
  686. tag->hdr.tag = ATAG_NONE;
  687. }
  688. void __init setup_arch(char **cmdline_p)
  689. {
  690. struct tag *tags = (struct tag *)&init_tags;
  691. struct machine_desc *mdesc;
  692. char *from = default_command_line;
  693. unwind_init();
  694. setup_processor();
  695. mdesc = setup_machine(machine_arch_type);
  696. machine_desc = mdesc;
  697. machine_name = mdesc->name;
  698. if (mdesc->soft_reboot)
  699. reboot_setup("s");
  700. if (__atags_pointer)
  701. tags = phys_to_virt(__atags_pointer);
  702. else if (mdesc->boot_params)
  703. tags = phys_to_virt(mdesc->boot_params);
  704. #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
  705. /*
  706. * If we have the old style parameters, convert them to
  707. * a tag list.
  708. */
  709. if (tags->hdr.tag != ATAG_CORE)
  710. convert_to_tag_list(tags);
  711. #endif
  712. if (tags->hdr.tag != ATAG_CORE)
  713. tags = (struct tag *)&init_tags;
  714. if (mdesc->fixup)
  715. mdesc->fixup(mdesc, tags, &from, &meminfo);
  716. if (tags->hdr.tag == ATAG_CORE) {
  717. if (meminfo.nr_banks != 0)
  718. squash_mem_tags(tags);
  719. save_atags(tags);
  720. parse_tags(tags);
  721. }
  722. init_mm.start_code = (unsigned long) _text;
  723. init_mm.end_code = (unsigned long) _etext;
  724. init_mm.end_data = (unsigned long) _edata;
  725. init_mm.brk = (unsigned long) _end;
  726. /* parse_early_param needs a boot_command_line */
  727. strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
  728. /* populate cmd_line too for later use, preserving boot_command_line */
  729. strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
  730. *cmdline_p = cmd_line;
  731. parse_early_param();
  732. arm_memblock_init(&meminfo, mdesc);
  733. paging_init(mdesc);
  734. request_standard_resources(mdesc);
  735. #ifdef CONFIG_SMP
  736. if (is_smp())
  737. smp_init_cpus();
  738. #endif
  739. reserve_crashkernel();
  740. cpu_init();
  741. tcm_init();
  742. #ifdef CONFIG_MULTI_IRQ_HANDLER
  743. handle_arch_irq = mdesc->handle_irq;
  744. #endif
  745. #ifdef CONFIG_VT
  746. #if defined(CONFIG_VGA_CONSOLE)
  747. conswitchp = &vga_con;
  748. #elif defined(CONFIG_DUMMY_CONSOLE)
  749. conswitchp = &dummy_con;
  750. #endif
  751. #endif
  752. early_trap_init();
  753. if (mdesc->init_early)
  754. mdesc->init_early();
  755. }
  756. static int __init topology_init(void)
  757. {
  758. int cpu;
  759. for_each_possible_cpu(cpu) {
  760. struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
  761. cpuinfo->cpu.hotpluggable = 1;
  762. register_cpu(&cpuinfo->cpu, cpu);
  763. }
  764. return 0;
  765. }
  766. subsys_initcall(topology_init);
  767. #ifdef CONFIG_HAVE_PROC_CPU
  768. static int __init proc_cpu_init(void)
  769. {
  770. struct proc_dir_entry *res;
  771. res = proc_mkdir("cpu", NULL);
  772. if (!res)
  773. return -ENOMEM;
  774. return 0;
  775. }
  776. fs_initcall(proc_cpu_init);
  777. #endif
  778. static const char *hwcap_str[] = {
  779. "swp",
  780. "half",
  781. "thumb",
  782. "26bit",
  783. "fastmult",
  784. "fpa",
  785. "vfp",
  786. "edsp",
  787. "java",
  788. "iwmmxt",
  789. "crunch",
  790. "thumbee",
  791. "neon",
  792. "vfpv3",
  793. "vfpv3d16",
  794. NULL
  795. };
  796. static int c_show(struct seq_file *m, void *v)
  797. {
  798. int i;
  799. seq_printf(m, "Processor\t: %s rev %d (%s)\n",
  800. cpu_name, read_cpuid_id() & 15, elf_platform);
  801. #if defined(CONFIG_SMP)
  802. for_each_online_cpu(i) {
  803. /*
  804. * glibc reads /proc/cpuinfo to determine the number of
  805. * online processors, looking for lines beginning with
  806. * "processor". Give glibc what it expects.
  807. */
  808. seq_printf(m, "processor\t: %d\n", i);
  809. seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
  810. per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
  811. (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
  812. }
  813. #else /* CONFIG_SMP */
  814. seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
  815. loops_per_jiffy / (500000/HZ),
  816. (loops_per_jiffy / (5000/HZ)) % 100);
  817. #endif
  818. /* dump out the processor features */
  819. seq_puts(m, "Features\t: ");
  820. for (i = 0; hwcap_str[i]; i++)
  821. if (elf_hwcap & (1 << i))
  822. seq_printf(m, "%s ", hwcap_str[i]);
  823. seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
  824. seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
  825. if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
  826. /* pre-ARM7 */
  827. seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
  828. } else {
  829. if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
  830. /* ARM7 */
  831. seq_printf(m, "CPU variant\t: 0x%02x\n",
  832. (read_cpuid_id() >> 16) & 127);
  833. } else {
  834. /* post-ARM7 */
  835. seq_printf(m, "CPU variant\t: 0x%x\n",
  836. (read_cpuid_id() >> 20) & 15);
  837. }
  838. seq_printf(m, "CPU part\t: 0x%03x\n",
  839. (read_cpuid_id() >> 4) & 0xfff);
  840. }
  841. seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
  842. seq_puts(m, "\n");
  843. seq_printf(m, "Hardware\t: %s\n", machine_name);
  844. seq_printf(m, "Revision\t: %04x\n", system_rev);
  845. seq_printf(m, "Serial\t\t: %08x%08x\n",
  846. system_serial_high, system_serial_low);
  847. return 0;
  848. }
  849. static void *c_start(struct seq_file *m, loff_t *pos)
  850. {
  851. return *pos < 1 ? (void *)1 : NULL;
  852. }
  853. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  854. {
  855. ++*pos;
  856. return NULL;
  857. }
  858. static void c_stop(struct seq_file *m, void *v)
  859. {
  860. }
  861. const struct seq_operations cpuinfo_op = {
  862. .start = c_start,
  863. .next = c_next,
  864. .stop = c_stop,
  865. .show = c_show
  866. };