setup.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. /*
  2. * linux/arch/arm/kernel/setup.c
  3. *
  4. * Copyright (C) 1995-2001 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/kernel.h>
  12. #include <linux/stddef.h>
  13. #include <linux/ioport.h>
  14. #include <linux/delay.h>
  15. #include <linux/utsname.h>
  16. #include <linux/initrd.h>
  17. #include <linux/console.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/screen_info.h>
  21. #include <linux/init.h>
  22. #include <linux/kexec.h>
  23. #include <linux/crash_dump.h>
  24. #include <linux/root_dev.h>
  25. #include <linux/cpu.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/smp.h>
  28. #include <linux/fs.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/memblock.h>
  31. #include <asm/unified.h>
  32. #include <asm/cpu.h>
  33. #include <asm/cputype.h>
  34. #include <asm/elf.h>
  35. #include <asm/procinfo.h>
  36. #include <asm/sections.h>
  37. #include <asm/setup.h>
  38. #include <asm/smp_plat.h>
  39. #include <asm/mach-types.h>
  40. #include <asm/cacheflush.h>
  41. #include <asm/cachetype.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/mach/arch.h>
  44. #include <asm/mach/irq.h>
  45. #include <asm/mach/time.h>
  46. #include <asm/traps.h>
  47. #include <asm/unwind.h>
  48. #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
  49. #include "compat.h"
  50. #endif
  51. #include "atags.h"
  52. #include "tcm.h"
  53. #ifndef MEM_SIZE
  54. #define MEM_SIZE (16*1024*1024)
  55. #endif
  56. #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  57. char fpe_type[8];
  58. static int __init fpe_setup(char *line)
  59. {
  60. memcpy(fpe_type, line, 8);
  61. return 1;
  62. }
  63. __setup("fpe=", fpe_setup);
  64. #endif
  65. extern void paging_init(struct machine_desc *desc);
  66. extern void reboot_setup(char *str);
  67. unsigned int processor_id;
  68. EXPORT_SYMBOL(processor_id);
  69. unsigned int __machine_arch_type __read_mostly;
  70. EXPORT_SYMBOL(__machine_arch_type);
  71. unsigned int cacheid __read_mostly;
  72. EXPORT_SYMBOL(cacheid);
  73. unsigned int __atags_pointer __initdata;
  74. unsigned int system_rev;
  75. EXPORT_SYMBOL(system_rev);
  76. unsigned int system_serial_low;
  77. EXPORT_SYMBOL(system_serial_low);
  78. unsigned int system_serial_high;
  79. EXPORT_SYMBOL(system_serial_high);
  80. unsigned int elf_hwcap __read_mostly;
  81. EXPORT_SYMBOL(elf_hwcap);
  82. #ifdef MULTI_CPU
  83. struct processor processor __read_mostly;
  84. #endif
  85. #ifdef MULTI_TLB
  86. struct cpu_tlb_fns cpu_tlb __read_mostly;
  87. #endif
  88. #ifdef MULTI_USER
  89. struct cpu_user_fns cpu_user __read_mostly;
  90. #endif
  91. #ifdef MULTI_CACHE
  92. struct cpu_cache_fns cpu_cache __read_mostly;
  93. #endif
  94. #ifdef CONFIG_OUTER_CACHE
  95. struct outer_cache_fns outer_cache __read_mostly;
  96. EXPORT_SYMBOL(outer_cache);
  97. #endif
  98. struct stack {
  99. u32 irq[3];
  100. u32 abt[3];
  101. u32 und[3];
  102. } ____cacheline_aligned;
  103. static struct stack stacks[NR_CPUS];
  104. char elf_platform[ELF_PLATFORM_SIZE];
  105. EXPORT_SYMBOL(elf_platform);
  106. static const char *cpu_name;
  107. static const char *machine_name;
  108. static char __initdata cmd_line[COMMAND_LINE_SIZE];
  109. struct machine_desc *machine_desc __initdata;
  110. static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
  111. static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
  112. #define ENDIANNESS ((char)endian_test.l)
  113. DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
  114. /*
  115. * Standard memory resources
  116. */
  117. static struct resource mem_res[] = {
  118. {
  119. .name = "Video RAM",
  120. .start = 0,
  121. .end = 0,
  122. .flags = IORESOURCE_MEM
  123. },
  124. {
  125. .name = "Kernel text",
  126. .start = 0,
  127. .end = 0,
  128. .flags = IORESOURCE_MEM
  129. },
  130. {
  131. .name = "Kernel data",
  132. .start = 0,
  133. .end = 0,
  134. .flags = IORESOURCE_MEM
  135. }
  136. };
  137. #define video_ram mem_res[0]
  138. #define kernel_code mem_res[1]
  139. #define kernel_data mem_res[2]
  140. static struct resource io_res[] = {
  141. {
  142. .name = "reserved",
  143. .start = 0x3bc,
  144. .end = 0x3be,
  145. .flags = IORESOURCE_IO | IORESOURCE_BUSY
  146. },
  147. {
  148. .name = "reserved",
  149. .start = 0x378,
  150. .end = 0x37f,
  151. .flags = IORESOURCE_IO | IORESOURCE_BUSY
  152. },
  153. {
  154. .name = "reserved",
  155. .start = 0x278,
  156. .end = 0x27f,
  157. .flags = IORESOURCE_IO | IORESOURCE_BUSY
  158. }
  159. };
  160. #define lp0 io_res[0]
  161. #define lp1 io_res[1]
  162. #define lp2 io_res[2]
  163. static const char *proc_arch[] = {
  164. "undefined/unknown",
  165. "3",
  166. "4",
  167. "4T",
  168. "5",
  169. "5T",
  170. "5TE",
  171. "5TEJ",
  172. "6TEJ",
  173. "7",
  174. "?(11)",
  175. "?(12)",
  176. "?(13)",
  177. "?(14)",
  178. "?(15)",
  179. "?(16)",
  180. "?(17)",
  181. };
  182. int cpu_architecture(void)
  183. {
  184. int cpu_arch;
  185. if ((read_cpuid_id() & 0x0008f000) == 0) {
  186. cpu_arch = CPU_ARCH_UNKNOWN;
  187. } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
  188. cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
  189. } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
  190. cpu_arch = (read_cpuid_id() >> 16) & 7;
  191. if (cpu_arch)
  192. cpu_arch += CPU_ARCH_ARMv3;
  193. } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
  194. unsigned int mmfr0;
  195. /* Revised CPUID format. Read the Memory Model Feature
  196. * Register 0 and check for VMSAv7 or PMSAv7 */
  197. asm("mrc p15, 0, %0, c0, c1, 4"
  198. : "=r" (mmfr0));
  199. if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
  200. (mmfr0 & 0x000000f0) >= 0x00000030)
  201. cpu_arch = CPU_ARCH_ARMv7;
  202. else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
  203. (mmfr0 & 0x000000f0) == 0x00000020)
  204. cpu_arch = CPU_ARCH_ARMv6;
  205. else
  206. cpu_arch = CPU_ARCH_UNKNOWN;
  207. } else
  208. cpu_arch = CPU_ARCH_UNKNOWN;
  209. return cpu_arch;
  210. }
  211. static int cpu_has_aliasing_icache(unsigned int arch)
  212. {
  213. int aliasing_icache;
  214. unsigned int id_reg, num_sets, line_size;
  215. /* arch specifies the register format */
  216. switch (arch) {
  217. case CPU_ARCH_ARMv7:
  218. asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
  219. : /* No output operands */
  220. : "r" (1));
  221. isb();
  222. asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
  223. : "=r" (id_reg));
  224. line_size = 4 << ((id_reg & 0x7) + 2);
  225. num_sets = ((id_reg >> 13) & 0x7fff) + 1;
  226. aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
  227. break;
  228. case CPU_ARCH_ARMv6:
  229. aliasing_icache = read_cpuid_cachetype() & (1 << 11);
  230. break;
  231. default:
  232. /* I-cache aliases will be handled by D-cache aliasing code */
  233. aliasing_icache = 0;
  234. }
  235. return aliasing_icache;
  236. }
  237. static void __init cacheid_init(void)
  238. {
  239. unsigned int cachetype = read_cpuid_cachetype();
  240. unsigned int arch = cpu_architecture();
  241. if (arch >= CPU_ARCH_ARMv6) {
  242. if ((cachetype & (7 << 29)) == 4 << 29) {
  243. /* ARMv7 register format */
  244. cacheid = CACHEID_VIPT_NONALIASING;
  245. if ((cachetype & (3 << 14)) == 1 << 14)
  246. cacheid |= CACHEID_ASID_TAGGED;
  247. else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
  248. cacheid |= CACHEID_VIPT_I_ALIASING;
  249. } else if (cachetype & (1 << 23)) {
  250. cacheid = CACHEID_VIPT_ALIASING;
  251. } else {
  252. cacheid = CACHEID_VIPT_NONALIASING;
  253. if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
  254. cacheid |= CACHEID_VIPT_I_ALIASING;
  255. }
  256. } else {
  257. cacheid = CACHEID_VIVT;
  258. }
  259. printk("CPU: %s data cache, %s instruction cache\n",
  260. cache_is_vivt() ? "VIVT" :
  261. cache_is_vipt_aliasing() ? "VIPT aliasing" :
  262. cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
  263. cache_is_vivt() ? "VIVT" :
  264. icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
  265. icache_is_vipt_aliasing() ? "VIPT aliasing" :
  266. cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
  267. }
  268. /*
  269. * These functions re-use the assembly code in head.S, which
  270. * already provide the required functionality.
  271. */
  272. extern struct proc_info_list *lookup_processor_type(unsigned int);
  273. static void __init early_print(const char *str, ...)
  274. {
  275. extern void printascii(const char *);
  276. char buf[256];
  277. va_list ap;
  278. va_start(ap, str);
  279. vsnprintf(buf, sizeof(buf), str, ap);
  280. va_end(ap);
  281. #ifdef CONFIG_DEBUG_LL
  282. printascii(buf);
  283. #endif
  284. printk("%s", buf);
  285. }
  286. static void __init feat_v6_fixup(void)
  287. {
  288. int id = read_cpuid_id();
  289. if ((id & 0xff0f0000) != 0x41070000)
  290. return;
  291. /*
  292. * HWCAP_TLS is available only on 1136 r1p0 and later,
  293. * see also kuser_get_tls_init.
  294. */
  295. if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
  296. elf_hwcap &= ~HWCAP_TLS;
  297. }
  298. static void __init setup_processor(void)
  299. {
  300. struct proc_info_list *list;
  301. /*
  302. * locate processor in the list of supported processor
  303. * types. The linker builds this table for us from the
  304. * entries in arch/arm/mm/proc-*.S
  305. */
  306. list = lookup_processor_type(read_cpuid_id());
  307. if (!list) {
  308. printk("CPU configuration botched (ID %08x), unable "
  309. "to continue.\n", read_cpuid_id());
  310. while (1);
  311. }
  312. cpu_name = list->cpu_name;
  313. #ifdef MULTI_CPU
  314. processor = *list->proc;
  315. #endif
  316. #ifdef MULTI_TLB
  317. cpu_tlb = *list->tlb;
  318. #endif
  319. #ifdef MULTI_USER
  320. cpu_user = *list->user;
  321. #endif
  322. #ifdef MULTI_CACHE
  323. cpu_cache = *list->cache;
  324. #endif
  325. printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
  326. cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
  327. proc_arch[cpu_architecture()], cr_alignment);
  328. sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
  329. sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
  330. elf_hwcap = list->elf_hwcap;
  331. #ifndef CONFIG_ARM_THUMB
  332. elf_hwcap &= ~HWCAP_THUMB;
  333. #endif
  334. feat_v6_fixup();
  335. cacheid_init();
  336. cpu_proc_init();
  337. }
  338. /*
  339. * cpu_init - initialise one CPU.
  340. *
  341. * cpu_init sets up the per-CPU stacks.
  342. */
  343. void cpu_init(void)
  344. {
  345. unsigned int cpu = smp_processor_id();
  346. struct stack *stk = &stacks[cpu];
  347. if (cpu >= NR_CPUS) {
  348. printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
  349. BUG();
  350. }
  351. /*
  352. * Define the placement constraint for the inline asm directive below.
  353. * In Thumb-2, msr with an immediate value is not allowed.
  354. */
  355. #ifdef CONFIG_THUMB2_KERNEL
  356. #define PLC "r"
  357. #else
  358. #define PLC "I"
  359. #endif
  360. /*
  361. * setup stacks for re-entrant exception handlers
  362. */
  363. __asm__ (
  364. "msr cpsr_c, %1\n\t"
  365. "add r14, %0, %2\n\t"
  366. "mov sp, r14\n\t"
  367. "msr cpsr_c, %3\n\t"
  368. "add r14, %0, %4\n\t"
  369. "mov sp, r14\n\t"
  370. "msr cpsr_c, %5\n\t"
  371. "add r14, %0, %6\n\t"
  372. "mov sp, r14\n\t"
  373. "msr cpsr_c, %7"
  374. :
  375. : "r" (stk),
  376. PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
  377. "I" (offsetof(struct stack, irq[0])),
  378. PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
  379. "I" (offsetof(struct stack, abt[0])),
  380. PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
  381. "I" (offsetof(struct stack, und[0])),
  382. PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
  383. : "r14");
  384. }
  385. static struct machine_desc * __init setup_machine(unsigned int nr)
  386. {
  387. extern struct machine_desc __arch_info_begin[], __arch_info_end[];
  388. struct machine_desc *p;
  389. /*
  390. * locate machine in the list of supported machines.
  391. */
  392. for (p = __arch_info_begin; p < __arch_info_end; p++)
  393. if (nr == p->nr) {
  394. printk("Machine: %s\n", p->name);
  395. return p;
  396. }
  397. early_print("\n"
  398. "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
  399. "Available machine support:\n\nID (hex)\tNAME\n", nr);
  400. for (p = __arch_info_begin; p < __arch_info_end; p++)
  401. early_print("%08x\t%s\n", p->nr, p->name);
  402. early_print("\nPlease check your kernel config and/or bootloader.\n");
  403. while (true)
  404. /* can't use cpu_relax() here as it may require MMU setup */;
  405. }
  406. static int __init arm_add_memory(phys_addr_t start, unsigned long size)
  407. {
  408. struct membank *bank = &meminfo.bank[meminfo.nr_banks];
  409. if (meminfo.nr_banks >= NR_BANKS) {
  410. printk(KERN_CRIT "NR_BANKS too low, "
  411. "ignoring memory at 0x%08llx\n", (long long)start);
  412. return -EINVAL;
  413. }
  414. /*
  415. * Ensure that start/size are aligned to a page boundary.
  416. * Size is appropriately rounded down, start is rounded up.
  417. */
  418. size -= start & ~PAGE_MASK;
  419. bank->start = PAGE_ALIGN(start);
  420. bank->size = size & PAGE_MASK;
  421. /*
  422. * Check whether this memory region has non-zero size or
  423. * invalid node number.
  424. */
  425. if (bank->size == 0)
  426. return -EINVAL;
  427. meminfo.nr_banks++;
  428. return 0;
  429. }
  430. /*
  431. * Pick out the memory size. We look for mem=size@start,
  432. * where start and size are "size[KkMm]"
  433. */
  434. static int __init early_mem(char *p)
  435. {
  436. static int usermem __initdata = 0;
  437. unsigned long size;
  438. phys_addr_t start;
  439. char *endp;
  440. /*
  441. * If the user specifies memory size, we
  442. * blow away any automatically generated
  443. * size.
  444. */
  445. if (usermem == 0) {
  446. usermem = 1;
  447. meminfo.nr_banks = 0;
  448. }
  449. start = PHYS_OFFSET;
  450. size = memparse(p, &endp);
  451. if (*endp == '@')
  452. start = memparse(endp + 1, NULL);
  453. arm_add_memory(start, size);
  454. return 0;
  455. }
  456. early_param("mem", early_mem);
  457. static void __init
  458. setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
  459. {
  460. #ifdef CONFIG_BLK_DEV_RAM
  461. extern int rd_size, rd_image_start, rd_prompt, rd_doload;
  462. rd_image_start = image_start;
  463. rd_prompt = prompt;
  464. rd_doload = doload;
  465. if (rd_sz)
  466. rd_size = rd_sz;
  467. #endif
  468. }
  469. static void __init request_standard_resources(struct machine_desc *mdesc)
  470. {
  471. struct memblock_region *region;
  472. struct resource *res;
  473. kernel_code.start = virt_to_phys(_text);
  474. kernel_code.end = virt_to_phys(_etext - 1);
  475. kernel_data.start = virt_to_phys(_sdata);
  476. kernel_data.end = virt_to_phys(_end - 1);
  477. for_each_memblock(memory, region) {
  478. res = alloc_bootmem_low(sizeof(*res));
  479. res->name = "System RAM";
  480. res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
  481. res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
  482. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  483. request_resource(&iomem_resource, res);
  484. if (kernel_code.start >= res->start &&
  485. kernel_code.end <= res->end)
  486. request_resource(res, &kernel_code);
  487. if (kernel_data.start >= res->start &&
  488. kernel_data.end <= res->end)
  489. request_resource(res, &kernel_data);
  490. }
  491. if (mdesc->video_start) {
  492. video_ram.start = mdesc->video_start;
  493. video_ram.end = mdesc->video_end;
  494. request_resource(&iomem_resource, &video_ram);
  495. }
  496. /*
  497. * Some machines don't have the possibility of ever
  498. * possessing lp0, lp1 or lp2
  499. */
  500. if (mdesc->reserve_lp0)
  501. request_resource(&ioport_resource, &lp0);
  502. if (mdesc->reserve_lp1)
  503. request_resource(&ioport_resource, &lp1);
  504. if (mdesc->reserve_lp2)
  505. request_resource(&ioport_resource, &lp2);
  506. }
  507. /*
  508. * Tag parsing.
  509. *
  510. * This is the new way of passing data to the kernel at boot time. Rather
  511. * than passing a fixed inflexible structure to the kernel, we pass a list
  512. * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
  513. * tag for the list to be recognised (to distinguish the tagged list from
  514. * a param_struct). The list is terminated with a zero-length tag (this tag
  515. * is not parsed in any way).
  516. */
  517. static int __init parse_tag_core(const struct tag *tag)
  518. {
  519. if (tag->hdr.size > 2) {
  520. if ((tag->u.core.flags & 1) == 0)
  521. root_mountflags &= ~MS_RDONLY;
  522. ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
  523. }
  524. return 0;
  525. }
  526. __tagtable(ATAG_CORE, parse_tag_core);
  527. static int __init parse_tag_mem32(const struct tag *tag)
  528. {
  529. return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
  530. }
  531. __tagtable(ATAG_MEM, parse_tag_mem32);
  532. #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
  533. struct screen_info screen_info = {
  534. .orig_video_lines = 30,
  535. .orig_video_cols = 80,
  536. .orig_video_mode = 0,
  537. .orig_video_ega_bx = 0,
  538. .orig_video_isVGA = 1,
  539. .orig_video_points = 8
  540. };
  541. static int __init parse_tag_videotext(const struct tag *tag)
  542. {
  543. screen_info.orig_x = tag->u.videotext.x;
  544. screen_info.orig_y = tag->u.videotext.y;
  545. screen_info.orig_video_page = tag->u.videotext.video_page;
  546. screen_info.orig_video_mode = tag->u.videotext.video_mode;
  547. screen_info.orig_video_cols = tag->u.videotext.video_cols;
  548. screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
  549. screen_info.orig_video_lines = tag->u.videotext.video_lines;
  550. screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
  551. screen_info.orig_video_points = tag->u.videotext.video_points;
  552. return 0;
  553. }
  554. __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
  555. #endif
  556. static int __init parse_tag_ramdisk(const struct tag *tag)
  557. {
  558. setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
  559. (tag->u.ramdisk.flags & 2) == 0,
  560. tag->u.ramdisk.start, tag->u.ramdisk.size);
  561. return 0;
  562. }
  563. __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
  564. static int __init parse_tag_serialnr(const struct tag *tag)
  565. {
  566. system_serial_low = tag->u.serialnr.low;
  567. system_serial_high = tag->u.serialnr.high;
  568. return 0;
  569. }
  570. __tagtable(ATAG_SERIAL, parse_tag_serialnr);
  571. static int __init parse_tag_revision(const struct tag *tag)
  572. {
  573. system_rev = tag->u.revision.rev;
  574. return 0;
  575. }
  576. __tagtable(ATAG_REVISION, parse_tag_revision);
  577. static int __init parse_tag_cmdline(const struct tag *tag)
  578. {
  579. #ifndef CONFIG_CMDLINE_FORCE
  580. strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
  581. #else
  582. pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
  583. #endif /* CONFIG_CMDLINE_FORCE */
  584. return 0;
  585. }
  586. __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
  587. /*
  588. * Scan the tag table for this tag, and call its parse function.
  589. * The tag table is built by the linker from all the __tagtable
  590. * declarations.
  591. */
  592. static int __init parse_tag(const struct tag *tag)
  593. {
  594. extern struct tagtable __tagtable_begin, __tagtable_end;
  595. struct tagtable *t;
  596. for (t = &__tagtable_begin; t < &__tagtable_end; t++)
  597. if (tag->hdr.tag == t->tag) {
  598. t->parse(tag);
  599. break;
  600. }
  601. return t < &__tagtable_end;
  602. }
  603. /*
  604. * Parse all tags in the list, checking both the global and architecture
  605. * specific tag tables.
  606. */
  607. static void __init parse_tags(const struct tag *t)
  608. {
  609. for (; t->hdr.size; t = tag_next(t))
  610. if (!parse_tag(t))
  611. printk(KERN_WARNING
  612. "Ignoring unrecognised tag 0x%08x\n",
  613. t->hdr.tag);
  614. }
  615. /*
  616. * This holds our defaults.
  617. */
  618. static struct init_tags {
  619. struct tag_header hdr1;
  620. struct tag_core core;
  621. struct tag_header hdr2;
  622. struct tag_mem32 mem;
  623. struct tag_header hdr3;
  624. } init_tags __initdata = {
  625. { tag_size(tag_core), ATAG_CORE },
  626. { 1, PAGE_SIZE, 0xff },
  627. { tag_size(tag_mem32), ATAG_MEM },
  628. { MEM_SIZE },
  629. { 0, ATAG_NONE }
  630. };
  631. static int __init customize_machine(void)
  632. {
  633. /* customizes platform devices, or adds new ones */
  634. if (machine_desc->init_machine)
  635. machine_desc->init_machine();
  636. return 0;
  637. }
  638. arch_initcall(customize_machine);
  639. #ifdef CONFIG_KEXEC
  640. static inline unsigned long long get_total_mem(void)
  641. {
  642. unsigned long total;
  643. total = max_low_pfn - min_low_pfn;
  644. return total << PAGE_SHIFT;
  645. }
  646. /**
  647. * reserve_crashkernel() - reserves memory are for crash kernel
  648. *
  649. * This function reserves memory area given in "crashkernel=" kernel command
  650. * line parameter. The memory reserved is used by a dump capture kernel when
  651. * primary kernel is crashing.
  652. */
  653. static void __init reserve_crashkernel(void)
  654. {
  655. unsigned long long crash_size, crash_base;
  656. unsigned long long total_mem;
  657. int ret;
  658. total_mem = get_total_mem();
  659. ret = parse_crashkernel(boot_command_line, total_mem,
  660. &crash_size, &crash_base);
  661. if (ret)
  662. return;
  663. ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
  664. if (ret < 0) {
  665. printk(KERN_WARNING "crashkernel reservation failed - "
  666. "memory is in use (0x%lx)\n", (unsigned long)crash_base);
  667. return;
  668. }
  669. printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
  670. "for crashkernel (System RAM: %ldMB)\n",
  671. (unsigned long)(crash_size >> 20),
  672. (unsigned long)(crash_base >> 20),
  673. (unsigned long)(total_mem >> 20));
  674. crashk_res.start = crash_base;
  675. crashk_res.end = crash_base + crash_size - 1;
  676. insert_resource(&iomem_resource, &crashk_res);
  677. }
  678. #else
  679. static inline void reserve_crashkernel(void) {}
  680. #endif /* CONFIG_KEXEC */
  681. static void __init squash_mem_tags(struct tag *tag)
  682. {
  683. for (; tag->hdr.size; tag = tag_next(tag))
  684. if (tag->hdr.tag == ATAG_MEM)
  685. tag->hdr.tag = ATAG_NONE;
  686. }
  687. void __init setup_arch(char **cmdline_p)
  688. {
  689. struct tag *tags = (struct tag *)&init_tags;
  690. struct machine_desc *mdesc;
  691. char *from = default_command_line;
  692. init_tags.mem.start = PHYS_OFFSET;
  693. unwind_init();
  694. setup_processor();
  695. mdesc = setup_machine(machine_arch_type);
  696. machine_desc = mdesc;
  697. machine_name = mdesc->name;
  698. if (mdesc->soft_reboot)
  699. reboot_setup("s");
  700. if (__atags_pointer)
  701. tags = phys_to_virt(__atags_pointer);
  702. else if (mdesc->boot_params) {
  703. #ifdef CONFIG_MMU
  704. /*
  705. * We still are executing with a minimal MMU mapping created
  706. * with the presumption that the machine default for this
  707. * is located in the first MB of RAM. Anything else will
  708. * fault and silently hang the kernel at this point.
  709. */
  710. if (mdesc->boot_params < PHYS_OFFSET ||
  711. mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
  712. printk(KERN_WARNING
  713. "Default boot params at physical 0x%08lx out of reach\n",
  714. mdesc->boot_params);
  715. } else
  716. #endif
  717. {
  718. tags = phys_to_virt(mdesc->boot_params);
  719. }
  720. }
  721. #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
  722. /*
  723. * If we have the old style parameters, convert them to
  724. * a tag list.
  725. */
  726. if (tags->hdr.tag != ATAG_CORE)
  727. convert_to_tag_list(tags);
  728. #endif
  729. if (tags->hdr.tag != ATAG_CORE)
  730. tags = (struct tag *)&init_tags;
  731. if (mdesc->fixup)
  732. mdesc->fixup(mdesc, tags, &from, &meminfo);
  733. if (tags->hdr.tag == ATAG_CORE) {
  734. if (meminfo.nr_banks != 0)
  735. squash_mem_tags(tags);
  736. save_atags(tags);
  737. parse_tags(tags);
  738. }
  739. init_mm.start_code = (unsigned long) _text;
  740. init_mm.end_code = (unsigned long) _etext;
  741. init_mm.end_data = (unsigned long) _edata;
  742. init_mm.brk = (unsigned long) _end;
  743. /* parse_early_param needs a boot_command_line */
  744. strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
  745. /* populate cmd_line too for later use, preserving boot_command_line */
  746. strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
  747. *cmdline_p = cmd_line;
  748. parse_early_param();
  749. arm_memblock_init(&meminfo, mdesc);
  750. paging_init(mdesc);
  751. request_standard_resources(mdesc);
  752. #ifdef CONFIG_SMP
  753. if (is_smp())
  754. smp_init_cpus();
  755. #endif
  756. reserve_crashkernel();
  757. cpu_init();
  758. tcm_init();
  759. #ifdef CONFIG_MULTI_IRQ_HANDLER
  760. handle_arch_irq = mdesc->handle_irq;
  761. #endif
  762. #ifdef CONFIG_VT
  763. #if defined(CONFIG_VGA_CONSOLE)
  764. conswitchp = &vga_con;
  765. #elif defined(CONFIG_DUMMY_CONSOLE)
  766. conswitchp = &dummy_con;
  767. #endif
  768. #endif
  769. early_trap_init();
  770. if (mdesc->init_early)
  771. mdesc->init_early();
  772. }
  773. static int __init topology_init(void)
  774. {
  775. int cpu;
  776. for_each_possible_cpu(cpu) {
  777. struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
  778. cpuinfo->cpu.hotpluggable = 1;
  779. register_cpu(&cpuinfo->cpu, cpu);
  780. }
  781. return 0;
  782. }
  783. subsys_initcall(topology_init);
  784. #ifdef CONFIG_HAVE_PROC_CPU
  785. static int __init proc_cpu_init(void)
  786. {
  787. struct proc_dir_entry *res;
  788. res = proc_mkdir("cpu", NULL);
  789. if (!res)
  790. return -ENOMEM;
  791. return 0;
  792. }
  793. fs_initcall(proc_cpu_init);
  794. #endif
  795. static const char *hwcap_str[] = {
  796. "swp",
  797. "half",
  798. "thumb",
  799. "26bit",
  800. "fastmult",
  801. "fpa",
  802. "vfp",
  803. "edsp",
  804. "java",
  805. "iwmmxt",
  806. "crunch",
  807. "thumbee",
  808. "neon",
  809. "vfpv3",
  810. "vfpv3d16",
  811. NULL
  812. };
  813. static int c_show(struct seq_file *m, void *v)
  814. {
  815. int i;
  816. seq_printf(m, "Processor\t: %s rev %d (%s)\n",
  817. cpu_name, read_cpuid_id() & 15, elf_platform);
  818. #if defined(CONFIG_SMP)
  819. for_each_online_cpu(i) {
  820. /*
  821. * glibc reads /proc/cpuinfo to determine the number of
  822. * online processors, looking for lines beginning with
  823. * "processor". Give glibc what it expects.
  824. */
  825. seq_printf(m, "processor\t: %d\n", i);
  826. seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
  827. per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
  828. (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
  829. }
  830. #else /* CONFIG_SMP */
  831. seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
  832. loops_per_jiffy / (500000/HZ),
  833. (loops_per_jiffy / (5000/HZ)) % 100);
  834. #endif
  835. /* dump out the processor features */
  836. seq_puts(m, "Features\t: ");
  837. for (i = 0; hwcap_str[i]; i++)
  838. if (elf_hwcap & (1 << i))
  839. seq_printf(m, "%s ", hwcap_str[i]);
  840. seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
  841. seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
  842. if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
  843. /* pre-ARM7 */
  844. seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
  845. } else {
  846. if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
  847. /* ARM7 */
  848. seq_printf(m, "CPU variant\t: 0x%02x\n",
  849. (read_cpuid_id() >> 16) & 127);
  850. } else {
  851. /* post-ARM7 */
  852. seq_printf(m, "CPU variant\t: 0x%x\n",
  853. (read_cpuid_id() >> 20) & 15);
  854. }
  855. seq_printf(m, "CPU part\t: 0x%03x\n",
  856. (read_cpuid_id() >> 4) & 0xfff);
  857. }
  858. seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
  859. seq_puts(m, "\n");
  860. seq_printf(m, "Hardware\t: %s\n", machine_name);
  861. seq_printf(m, "Revision\t: %04x\n", system_rev);
  862. seq_printf(m, "Serial\t\t: %08x%08x\n",
  863. system_serial_high, system_serial_low);
  864. return 0;
  865. }
  866. static void *c_start(struct seq_file *m, loff_t *pos)
  867. {
  868. return *pos < 1 ? (void *)1 : NULL;
  869. }
  870. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  871. {
  872. ++*pos;
  873. return NULL;
  874. }
  875. static void c_stop(struct seq_file *m, void *v)
  876. {
  877. }
  878. const struct seq_operations cpuinfo_op = {
  879. .start = c_start,
  880. .next = c_next,
  881. .stop = c_stop,
  882. .show = c_show
  883. };