setup.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. /*
  2. * Architecture-specific setup.
  3. *
  4. * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. * Stephane Eranian <eranian@hpl.hp.com>
  7. * Copyright (C) 2000, 2004 Intel Corp
  8. * Rohit Seth <rohit.seth@intel.com>
  9. * Suresh Siddha <suresh.b.siddha@intel.com>
  10. * Gordon Jin <gordon.jin@intel.com>
  11. * Copyright (C) 1999 VA Linux Systems
  12. * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  13. *
  14. * 12/26/04 S.Siddha, G.Jin, R.Seth
  15. * Add multi-threading and multi-core detection
  16. * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
  17. * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
  18. * 03/31/00 R.Seth cpu_initialized and current->processor fixes
  19. * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
  20. * 02/01/00 R.Seth fixed get_cpuinfo for SMP
  21. * 01/07/99 S.Eranian added the support for command line argument
  22. * 06/24/99 W.Drummond added boot_cpu_data.
  23. * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
  24. */
  25. #include <linux/module.h>
  26. #include <linux/init.h>
  27. #include <linux/acpi.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/console.h>
  30. #include <linux/delay.h>
  31. #include <linux/kernel.h>
  32. #include <linux/reboot.h>
  33. #include <linux/sched.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/string.h>
  36. #include <linux/threads.h>
  37. #include <linux/screen_info.h>
  38. #include <linux/dmi.h>
  39. #include <linux/serial.h>
  40. #include <linux/serial_core.h>
  41. #include <linux/efi.h>
  42. #include <linux/initrd.h>
  43. #include <linux/pm.h>
  44. #include <linux/cpufreq.h>
  45. #include <linux/kexec.h>
  46. #include <linux/crash_dump.h>
  47. #include <asm/ia32.h>
  48. #include <asm/machvec.h>
  49. #include <asm/mca.h>
  50. #include <asm/meminit.h>
  51. #include <asm/page.h>
  52. #include <asm/paravirt.h>
  53. #include <asm/patch.h>
  54. #include <asm/pgtable.h>
  55. #include <asm/processor.h>
  56. #include <asm/sal.h>
  57. #include <asm/sections.h>
  58. #include <asm/setup.h>
  59. #include <asm/smp.h>
  60. #include <asm/system.h>
  61. #include <asm/tlbflush.h>
  62. #include <asm/unistd.h>
  63. #include <asm/hpsim.h>
  64. #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
  65. # error "struct cpuinfo_ia64 too big!"
  66. #endif
  67. #ifdef CONFIG_SMP
  68. unsigned long __per_cpu_offset[NR_CPUS];
  69. EXPORT_SYMBOL(__per_cpu_offset);
  70. #endif
  71. DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
  72. DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
  73. unsigned long ia64_cycles_per_usec;
  74. struct ia64_boot_param *ia64_boot_param;
  75. struct screen_info screen_info;
  76. unsigned long vga_console_iobase;
  77. unsigned long vga_console_membase;
  78. static struct resource data_resource = {
  79. .name = "Kernel data",
  80. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  81. };
  82. static struct resource code_resource = {
  83. .name = "Kernel code",
  84. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  85. };
  86. static struct resource bss_resource = {
  87. .name = "Kernel bss",
  88. .flags = IORESOURCE_BUSY | IORESOURCE_MEM
  89. };
  90. unsigned long ia64_max_cacheline_size;
  91. int dma_get_cache_alignment(void)
  92. {
  93. return ia64_max_cacheline_size;
  94. }
  95. EXPORT_SYMBOL(dma_get_cache_alignment);
  96. unsigned long ia64_iobase; /* virtual address for I/O accesses */
  97. EXPORT_SYMBOL(ia64_iobase);
  98. struct io_space io_space[MAX_IO_SPACES];
  99. EXPORT_SYMBOL(io_space);
  100. unsigned int num_io_spaces;
  101. /*
  102. * "flush_icache_range()" needs to know what processor dependent stride size to use
  103. * when it makes i-cache(s) coherent with d-caches.
  104. */
  105. #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
  106. unsigned long ia64_i_cache_stride_shift = ~0;
  107. /*
  108. * "clflush_cache_range()" needs to know what processor dependent stride size to
  109. * use when it flushes cache lines including both d-cache and i-cache.
  110. */
  111. /* Safest way to go: 32 bytes by 32 bytes */
  112. #define CACHE_STRIDE_SHIFT 5
  113. unsigned long ia64_cache_stride_shift = ~0;
  114. /*
  115. * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
  116. * mask specifies a mask of address bits that must be 0 in order for two buffers to be
  117. * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
  118. * address of the second buffer must be aligned to (merge_mask+1) in order to be
  119. * mergeable). By default, we assume there is no I/O MMU which can merge physically
  120. * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
  121. * page-size of 2^64.
  122. */
  123. unsigned long ia64_max_iommu_merge_mask = ~0UL;
  124. EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
  125. /*
  126. * We use a special marker for the end of memory and it uses the extra (+1) slot
  127. */
  128. struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
  129. int num_rsvd_regions __initdata;
  130. /*
  131. * Filter incoming memory segments based on the primitive map created from the boot
  132. * parameters. Segments contained in the map are removed from the memory ranges. A
  133. * caller-specified function is called with the memory ranges that remain after filtering.
  134. * This routine does not assume the incoming segments are sorted.
  135. */
  136. int __init
  137. filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
  138. {
  139. unsigned long range_start, range_end, prev_start;
  140. void (*func)(unsigned long, unsigned long, int);
  141. int i;
  142. #if IGNORE_PFN0
  143. if (start == PAGE_OFFSET) {
  144. printk(KERN_WARNING "warning: skipping physical page 0\n");
  145. start += PAGE_SIZE;
  146. if (start >= end) return 0;
  147. }
  148. #endif
  149. /*
  150. * lowest possible address(walker uses virtual)
  151. */
  152. prev_start = PAGE_OFFSET;
  153. func = arg;
  154. for (i = 0; i < num_rsvd_regions; ++i) {
  155. range_start = max(start, prev_start);
  156. range_end = min(end, rsvd_region[i].start);
  157. if (range_start < range_end)
  158. call_pernode_memory(__pa(range_start), range_end - range_start, func);
  159. /* nothing more available in this segment */
  160. if (range_end == end) return 0;
  161. prev_start = rsvd_region[i].end;
  162. }
  163. /* end of memory marker allows full processing inside loop body */
  164. return 0;
  165. }
  166. /*
  167. * Similar to "filter_rsvd_memory()", but the reserved memory ranges
  168. * are not filtered out.
  169. */
  170. int __init
  171. filter_memory(unsigned long start, unsigned long end, void *arg)
  172. {
  173. void (*func)(unsigned long, unsigned long, int);
  174. #if IGNORE_PFN0
  175. if (start == PAGE_OFFSET) {
  176. printk(KERN_WARNING "warning: skipping physical page 0\n");
  177. start += PAGE_SIZE;
  178. if (start >= end)
  179. return 0;
  180. }
  181. #endif
  182. func = arg;
  183. if (start < end)
  184. call_pernode_memory(__pa(start), end - start, func);
  185. return 0;
  186. }
  187. static void __init
  188. sort_regions (struct rsvd_region *rsvd_region, int max)
  189. {
  190. int j;
  191. /* simple bubble sorting */
  192. while (max--) {
  193. for (j = 0; j < max; ++j) {
  194. if (rsvd_region[j].start > rsvd_region[j+1].start) {
  195. struct rsvd_region tmp;
  196. tmp = rsvd_region[j];
  197. rsvd_region[j] = rsvd_region[j + 1];
  198. rsvd_region[j + 1] = tmp;
  199. }
  200. }
  201. }
  202. }
  203. /*
  204. * Request address space for all standard resources
  205. */
  206. static int __init register_memory(void)
  207. {
  208. code_resource.start = ia64_tpa(_text);
  209. code_resource.end = ia64_tpa(_etext) - 1;
  210. data_resource.start = ia64_tpa(_etext);
  211. data_resource.end = ia64_tpa(_edata) - 1;
  212. bss_resource.start = ia64_tpa(__bss_start);
  213. bss_resource.end = ia64_tpa(_end) - 1;
  214. efi_initialize_iomem_resources(&code_resource, &data_resource,
  215. &bss_resource);
  216. return 0;
  217. }
  218. __initcall(register_memory);
  219. #ifdef CONFIG_KEXEC
  220. /*
  221. * This function checks if the reserved crashkernel is allowed on the specific
  222. * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
  223. * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
  224. * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
  225. * in kdump case. See the comment in sba_init() in sba_iommu.c.
  226. *
  227. * So, the only machvec that really supports loading the kdump kernel
  228. * over 4 GB is "sn2".
  229. */
  230. static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
  231. {
  232. if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
  233. return 1;
  234. else
  235. return pbase < (1UL << 32);
  236. }
  237. static void __init setup_crashkernel(unsigned long total, int *n)
  238. {
  239. unsigned long long base = 0, size = 0;
  240. int ret;
  241. ret = parse_crashkernel(boot_command_line, total,
  242. &size, &base);
  243. if (ret == 0 && size > 0) {
  244. if (!base) {
  245. sort_regions(rsvd_region, *n);
  246. base = kdump_find_rsvd_region(size,
  247. rsvd_region, *n);
  248. }
  249. if (!check_crashkernel_memory(base, size)) {
  250. pr_warning("crashkernel: There would be kdump memory "
  251. "at %ld GB but this is unusable because it "
  252. "must\nbe below 4 GB. Change the memory "
  253. "configuration of the machine.\n",
  254. (unsigned long)(base >> 30));
  255. return;
  256. }
  257. if (base != ~0UL) {
  258. printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
  259. "for crashkernel (System RAM: %ldMB)\n",
  260. (unsigned long)(size >> 20),
  261. (unsigned long)(base >> 20),
  262. (unsigned long)(total >> 20));
  263. rsvd_region[*n].start =
  264. (unsigned long)__va(base);
  265. rsvd_region[*n].end =
  266. (unsigned long)__va(base + size);
  267. (*n)++;
  268. crashk_res.start = base;
  269. crashk_res.end = base + size - 1;
  270. }
  271. }
  272. efi_memmap_res.start = ia64_boot_param->efi_memmap;
  273. efi_memmap_res.end = efi_memmap_res.start +
  274. ia64_boot_param->efi_memmap_size;
  275. boot_param_res.start = __pa(ia64_boot_param);
  276. boot_param_res.end = boot_param_res.start +
  277. sizeof(*ia64_boot_param);
  278. }
  279. #else
  280. static inline void __init setup_crashkernel(unsigned long total, int *n)
  281. {}
  282. #endif
  283. /**
  284. * reserve_memory - setup reserved memory areas
  285. *
  286. * Setup the reserved memory areas set aside for the boot parameters,
  287. * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
  288. * see arch/ia64/include/asm/meminit.h if you need to define more.
  289. */
  290. void __init
  291. reserve_memory (void)
  292. {
  293. int n = 0;
  294. unsigned long total_memory;
  295. /*
  296. * none of the entries in this table overlap
  297. */
  298. rsvd_region[n].start = (unsigned long) ia64_boot_param;
  299. rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
  300. n++;
  301. rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
  302. rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
  303. n++;
  304. rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
  305. rsvd_region[n].end = (rsvd_region[n].start
  306. + strlen(__va(ia64_boot_param->command_line)) + 1);
  307. n++;
  308. rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
  309. rsvd_region[n].end = (unsigned long) ia64_imva(_end);
  310. n++;
  311. n += paravirt_reserve_memory(&rsvd_region[n]);
  312. #ifdef CONFIG_BLK_DEV_INITRD
  313. if (ia64_boot_param->initrd_start) {
  314. rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
  315. rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
  316. n++;
  317. }
  318. #endif
  319. #ifdef CONFIG_PROC_VMCORE
  320. if (reserve_elfcorehdr(&rsvd_region[n].start,
  321. &rsvd_region[n].end) == 0)
  322. n++;
  323. #endif
  324. total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
  325. n++;
  326. setup_crashkernel(total_memory, &n);
  327. /* end of memory marker */
  328. rsvd_region[n].start = ~0UL;
  329. rsvd_region[n].end = ~0UL;
  330. n++;
  331. num_rsvd_regions = n;
  332. BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
  333. sort_regions(rsvd_region, num_rsvd_regions);
  334. }
  335. /**
  336. * find_initrd - get initrd parameters from the boot parameter structure
  337. *
  338. * Grab the initrd start and end from the boot parameter struct given us by
  339. * the boot loader.
  340. */
  341. void __init
  342. find_initrd (void)
  343. {
  344. #ifdef CONFIG_BLK_DEV_INITRD
  345. if (ia64_boot_param->initrd_start) {
  346. initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
  347. initrd_end = initrd_start+ia64_boot_param->initrd_size;
  348. printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
  349. initrd_start, ia64_boot_param->initrd_size);
  350. }
  351. #endif
  352. }
  353. static void __init
  354. io_port_init (void)
  355. {
  356. unsigned long phys_iobase;
  357. /*
  358. * Set `iobase' based on the EFI memory map or, failing that, the
  359. * value firmware left in ar.k0.
  360. *
  361. * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
  362. * the port's virtual address, so ia32_load_state() loads it with a
  363. * user virtual address. But in ia64 mode, glibc uses the
  364. * *physical* address in ar.k0 to mmap the appropriate area from
  365. * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
  366. * cases, user-mode can only use the legacy 0-64K I/O port space.
  367. *
  368. * ar.k0 is not involved in kernel I/O port accesses, which can use
  369. * any of the I/O port spaces and are done via MMIO using the
  370. * virtual mmio_base from the appropriate io_space[].
  371. */
  372. phys_iobase = efi_get_iobase();
  373. if (!phys_iobase) {
  374. phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
  375. printk(KERN_INFO "No I/O port range found in EFI memory map, "
  376. "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
  377. }
  378. ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
  379. ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
  380. /* setup legacy IO port space */
  381. io_space[0].mmio_base = ia64_iobase;
  382. io_space[0].sparse = 1;
  383. num_io_spaces = 1;
  384. }
  385. /**
  386. * early_console_setup - setup debugging console
  387. *
  388. * Consoles started here require little enough setup that we can start using
  389. * them very early in the boot process, either right after the machine
  390. * vector initialization, or even before if the drivers can detect their hw.
  391. *
  392. * Returns non-zero if a console couldn't be setup.
  393. */
  394. static inline int __init
  395. early_console_setup (char *cmdline)
  396. {
  397. int earlycons = 0;
  398. #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
  399. {
  400. extern int sn_serial_console_early_setup(void);
  401. if (!sn_serial_console_early_setup())
  402. earlycons++;
  403. }
  404. #endif
  405. #ifdef CONFIG_EFI_PCDP
  406. if (!efi_setup_pcdp_console(cmdline))
  407. earlycons++;
  408. #endif
  409. if (!simcons_register())
  410. earlycons++;
  411. return (earlycons) ? 0 : -1;
  412. }
  413. static inline void
  414. mark_bsp_online (void)
  415. {
  416. #ifdef CONFIG_SMP
  417. /* If we register an early console, allow CPU 0 to printk */
  418. cpu_set(smp_processor_id(), cpu_online_map);
  419. #endif
  420. }
  421. static __initdata int nomca;
  422. static __init int setup_nomca(char *s)
  423. {
  424. nomca = 1;
  425. return 0;
  426. }
  427. early_param("nomca", setup_nomca);
  428. #ifdef CONFIG_PROC_VMCORE
  429. /* elfcorehdr= specifies the location of elf core header
  430. * stored by the crashed kernel.
  431. */
  432. static int __init parse_elfcorehdr(char *arg)
  433. {
  434. if (!arg)
  435. return -EINVAL;
  436. elfcorehdr_addr = memparse(arg, &arg);
  437. return 0;
  438. }
  439. early_param("elfcorehdr", parse_elfcorehdr);
  440. int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
  441. {
  442. unsigned long length;
  443. /* We get the address using the kernel command line,
  444. * but the size is extracted from the EFI tables.
  445. * Both address and size are required for reservation
  446. * to work properly.
  447. */
  448. if (elfcorehdr_addr >= ELFCORE_ADDR_MAX)
  449. return -EINVAL;
  450. if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
  451. elfcorehdr_addr = ELFCORE_ADDR_MAX;
  452. return -EINVAL;
  453. }
  454. *start = (unsigned long)__va(elfcorehdr_addr);
  455. *end = *start + length;
  456. return 0;
  457. }
  458. #endif /* CONFIG_PROC_VMCORE */
  459. void __init
  460. setup_arch (char **cmdline_p)
  461. {
  462. unw_init();
  463. paravirt_arch_setup_early();
  464. ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
  465. *cmdline_p = __va(ia64_boot_param->command_line);
  466. strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
  467. efi_init();
  468. io_port_init();
  469. #ifdef CONFIG_IA64_GENERIC
  470. /* machvec needs to be parsed from the command line
  471. * before parse_early_param() is called to ensure
  472. * that ia64_mv is initialised before any command line
  473. * settings may cause console setup to occur
  474. */
  475. machvec_init_from_cmdline(*cmdline_p);
  476. #endif
  477. parse_early_param();
  478. if (early_console_setup(*cmdline_p) == 0)
  479. mark_bsp_online();
  480. #ifdef CONFIG_ACPI
  481. /* Initialize the ACPI boot-time table parser */
  482. acpi_table_init();
  483. # ifdef CONFIG_ACPI_NUMA
  484. acpi_numa_init();
  485. per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
  486. 32 : cpus_weight(early_cpu_possible_map)),
  487. additional_cpus > 0 ? additional_cpus : 0);
  488. # endif
  489. #else
  490. # ifdef CONFIG_SMP
  491. smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
  492. # endif
  493. #endif /* CONFIG_APCI_BOOT */
  494. find_memory();
  495. /* process SAL system table: */
  496. ia64_sal_init(__va(efi.sal_systab));
  497. #ifdef CONFIG_ITANIUM
  498. ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
  499. #else
  500. {
  501. u64 num_phys_stacked;
  502. if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
  503. ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
  504. }
  505. #endif
  506. #ifdef CONFIG_SMP
  507. cpu_physical_id(0) = hard_smp_processor_id();
  508. #endif
  509. cpu_init(); /* initialize the bootstrap CPU */
  510. mmu_context_init(); /* initialize context_id bitmap */
  511. #ifdef CONFIG_ACPI
  512. acpi_boot_init();
  513. #endif
  514. paravirt_banner();
  515. paravirt_arch_setup_console(cmdline_p);
  516. #ifdef CONFIG_VT
  517. if (!conswitchp) {
  518. # if defined(CONFIG_DUMMY_CONSOLE)
  519. conswitchp = &dummy_con;
  520. # endif
  521. # if defined(CONFIG_VGA_CONSOLE)
  522. /*
  523. * Non-legacy systems may route legacy VGA MMIO range to system
  524. * memory. vga_con probes the MMIO hole, so memory looks like
  525. * a VGA device to it. The EFI memory map can tell us if it's
  526. * memory so we can avoid this problem.
  527. */
  528. if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
  529. conswitchp = &vga_con;
  530. # endif
  531. }
  532. #endif
  533. /* enable IA-64 Machine Check Abort Handling unless disabled */
  534. if (paravirt_arch_setup_nomca())
  535. nomca = 1;
  536. if (!nomca)
  537. ia64_mca_init();
  538. platform_setup(cmdline_p);
  539. #ifndef CONFIG_IA64_HP_SIM
  540. check_sal_cache_flush();
  541. #endif
  542. paging_init();
  543. }
  544. /*
  545. * Display cpu info for all CPUs.
  546. */
  547. static int
  548. show_cpuinfo (struct seq_file *m, void *v)
  549. {
  550. #ifdef CONFIG_SMP
  551. # define lpj c->loops_per_jiffy
  552. # define cpunum c->cpu
  553. #else
  554. # define lpj loops_per_jiffy
  555. # define cpunum 0
  556. #endif
  557. static struct {
  558. unsigned long mask;
  559. const char *feature_name;
  560. } feature_bits[] = {
  561. { 1UL << 0, "branchlong" },
  562. { 1UL << 1, "spontaneous deferral"},
  563. { 1UL << 2, "16-byte atomic ops" }
  564. };
  565. char features[128], *cp, *sep;
  566. struct cpuinfo_ia64 *c = v;
  567. unsigned long mask;
  568. unsigned long proc_freq;
  569. int i, size;
  570. mask = c->features;
  571. /* build the feature string: */
  572. memcpy(features, "standard", 9);
  573. cp = features;
  574. size = sizeof(features);
  575. sep = "";
  576. for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
  577. if (mask & feature_bits[i].mask) {
  578. cp += snprintf(cp, size, "%s%s", sep,
  579. feature_bits[i].feature_name),
  580. sep = ", ";
  581. mask &= ~feature_bits[i].mask;
  582. size = sizeof(features) - (cp - features);
  583. }
  584. }
  585. if (mask && size > 1) {
  586. /* print unknown features as a hex value */
  587. snprintf(cp, size, "%s0x%lx", sep, mask);
  588. }
  589. proc_freq = cpufreq_quick_get(cpunum);
  590. if (!proc_freq)
  591. proc_freq = c->proc_freq / 1000;
  592. seq_printf(m,
  593. "processor : %d\n"
  594. "vendor : %s\n"
  595. "arch : IA-64\n"
  596. "family : %u\n"
  597. "model : %u\n"
  598. "model name : %s\n"
  599. "revision : %u\n"
  600. "archrev : %u\n"
  601. "features : %s\n"
  602. "cpu number : %lu\n"
  603. "cpu regs : %u\n"
  604. "cpu MHz : %lu.%03lu\n"
  605. "itc MHz : %lu.%06lu\n"
  606. "BogoMIPS : %lu.%02lu\n",
  607. cpunum, c->vendor, c->family, c->model,
  608. c->model_name, c->revision, c->archrev,
  609. features, c->ppn, c->number,
  610. proc_freq / 1000, proc_freq % 1000,
  611. c->itc_freq / 1000000, c->itc_freq % 1000000,
  612. lpj*HZ/500000, (lpj*HZ/5000) % 100);
  613. #ifdef CONFIG_SMP
  614. seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
  615. if (c->socket_id != -1)
  616. seq_printf(m, "physical id: %u\n", c->socket_id);
  617. if (c->threads_per_core > 1 || c->cores_per_socket > 1)
  618. seq_printf(m,
  619. "core id : %u\n"
  620. "thread id : %u\n",
  621. c->core_id, c->thread_id);
  622. #endif
  623. seq_printf(m,"\n");
  624. return 0;
  625. }
  626. static void *
  627. c_start (struct seq_file *m, loff_t *pos)
  628. {
  629. #ifdef CONFIG_SMP
  630. while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
  631. ++*pos;
  632. #endif
  633. return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
  634. }
  635. static void *
  636. c_next (struct seq_file *m, void *v, loff_t *pos)
  637. {
  638. ++*pos;
  639. return c_start(m, pos);
  640. }
  641. static void
  642. c_stop (struct seq_file *m, void *v)
  643. {
  644. }
  645. const struct seq_operations cpuinfo_op = {
  646. .start = c_start,
  647. .next = c_next,
  648. .stop = c_stop,
  649. .show = show_cpuinfo
  650. };
  651. #define MAX_BRANDS 8
  652. static char brandname[MAX_BRANDS][128];
  653. static char * __cpuinit
  654. get_model_name(__u8 family, __u8 model)
  655. {
  656. static int overflow;
  657. char brand[128];
  658. int i;
  659. memcpy(brand, "Unknown", 8);
  660. if (ia64_pal_get_brand_info(brand)) {
  661. if (family == 0x7)
  662. memcpy(brand, "Merced", 7);
  663. else if (family == 0x1f) switch (model) {
  664. case 0: memcpy(brand, "McKinley", 9); break;
  665. case 1: memcpy(brand, "Madison", 8); break;
  666. case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
  667. }
  668. }
  669. for (i = 0; i < MAX_BRANDS; i++)
  670. if (strcmp(brandname[i], brand) == 0)
  671. return brandname[i];
  672. for (i = 0; i < MAX_BRANDS; i++)
  673. if (brandname[i][0] == '\0')
  674. return strcpy(brandname[i], brand);
  675. if (overflow++ == 0)
  676. printk(KERN_ERR
  677. "%s: Table overflow. Some processor model information will be missing\n",
  678. __func__);
  679. return "Unknown";
  680. }
  681. static void __cpuinit
  682. identify_cpu (struct cpuinfo_ia64 *c)
  683. {
  684. union {
  685. unsigned long bits[5];
  686. struct {
  687. /* id 0 & 1: */
  688. char vendor[16];
  689. /* id 2 */
  690. u64 ppn; /* processor serial number */
  691. /* id 3: */
  692. unsigned number : 8;
  693. unsigned revision : 8;
  694. unsigned model : 8;
  695. unsigned family : 8;
  696. unsigned archrev : 8;
  697. unsigned reserved : 24;
  698. /* id 4: */
  699. u64 features;
  700. } field;
  701. } cpuid;
  702. pal_vm_info_1_u_t vm1;
  703. pal_vm_info_2_u_t vm2;
  704. pal_status_t status;
  705. unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
  706. int i;
  707. for (i = 0; i < 5; ++i)
  708. cpuid.bits[i] = ia64_get_cpuid(i);
  709. memcpy(c->vendor, cpuid.field.vendor, 16);
  710. #ifdef CONFIG_SMP
  711. c->cpu = smp_processor_id();
  712. /* below default values will be overwritten by identify_siblings()
  713. * for Multi-Threading/Multi-Core capable CPUs
  714. */
  715. c->threads_per_core = c->cores_per_socket = c->num_log = 1;
  716. c->socket_id = -1;
  717. identify_siblings(c);
  718. if (c->threads_per_core > smp_num_siblings)
  719. smp_num_siblings = c->threads_per_core;
  720. #endif
  721. c->ppn = cpuid.field.ppn;
  722. c->number = cpuid.field.number;
  723. c->revision = cpuid.field.revision;
  724. c->model = cpuid.field.model;
  725. c->family = cpuid.field.family;
  726. c->archrev = cpuid.field.archrev;
  727. c->features = cpuid.field.features;
  728. c->model_name = get_model_name(c->family, c->model);
  729. status = ia64_pal_vm_summary(&vm1, &vm2);
  730. if (status == PAL_STATUS_SUCCESS) {
  731. impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
  732. phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
  733. }
  734. c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
  735. c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
  736. }
  737. void __init
  738. setup_per_cpu_areas (void)
  739. {
  740. /* start_kernel() requires this... */
  741. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  742. prefill_possible_map();
  743. #endif
  744. }
  745. /*
  746. * Do the following calculations:
  747. *
  748. * 1. the max. cache line size.
  749. * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
  750. * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
  751. */
  752. static void __cpuinit
  753. get_cache_info(void)
  754. {
  755. unsigned long line_size, max = 1;
  756. u64 l, levels, unique_caches;
  757. pal_cache_config_info_t cci;
  758. s64 status;
  759. status = ia64_pal_cache_summary(&levels, &unique_caches);
  760. if (status != 0) {
  761. printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
  762. __func__, status);
  763. max = SMP_CACHE_BYTES;
  764. /* Safest setup for "flush_icache_range()" */
  765. ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
  766. /* Safest setup for "clflush_cache_range()" */
  767. ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
  768. goto out;
  769. }
  770. for (l = 0; l < levels; ++l) {
  771. /* cache_type (data_or_unified)=2 */
  772. status = ia64_pal_cache_config_info(l, 2, &cci);
  773. if (status != 0) {
  774. printk(KERN_ERR
  775. "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
  776. __func__, l, status);
  777. max = SMP_CACHE_BYTES;
  778. /* The safest setup for "flush_icache_range()" */
  779. cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
  780. /* The safest setup for "clflush_cache_range()" */
  781. ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
  782. cci.pcci_unified = 1;
  783. } else {
  784. if (cci.pcci_stride < ia64_cache_stride_shift)
  785. ia64_cache_stride_shift = cci.pcci_stride;
  786. line_size = 1 << cci.pcci_line_size;
  787. if (line_size > max)
  788. max = line_size;
  789. }
  790. if (!cci.pcci_unified) {
  791. /* cache_type (instruction)=1*/
  792. status = ia64_pal_cache_config_info(l, 1, &cci);
  793. if (status != 0) {
  794. printk(KERN_ERR
  795. "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
  796. __func__, l, status);
  797. /* The safest setup for "flush_icache_range()" */
  798. cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
  799. }
  800. }
  801. if (cci.pcci_stride < ia64_i_cache_stride_shift)
  802. ia64_i_cache_stride_shift = cci.pcci_stride;
  803. }
  804. out:
  805. if (max > ia64_max_cacheline_size)
  806. ia64_max_cacheline_size = max;
  807. }
  808. /*
  809. * cpu_init() initializes state that is per-CPU. This function acts
  810. * as a 'CPU state barrier', nothing should get across.
  811. */
  812. void __cpuinit
  813. cpu_init (void)
  814. {
  815. extern void __cpuinit ia64_mmu_init (void *);
  816. static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
  817. unsigned long num_phys_stacked;
  818. pal_vm_info_2_u_t vmi;
  819. unsigned int max_ctx;
  820. struct cpuinfo_ia64 *cpu_info;
  821. void *cpu_data;
  822. cpu_data = per_cpu_init();
  823. #ifdef CONFIG_SMP
  824. /*
  825. * insert boot cpu into sibling and core mapes
  826. * (must be done after per_cpu area is setup)
  827. */
  828. if (smp_processor_id() == 0) {
  829. cpu_set(0, per_cpu(cpu_sibling_map, 0));
  830. cpu_set(0, cpu_core_map[0]);
  831. } else {
  832. /*
  833. * Set ar.k3 so that assembly code in MCA handler can compute
  834. * physical addresses of per cpu variables with a simple:
  835. * phys = ar.k3 + &per_cpu_var
  836. * and the alt-dtlb-miss handler can set per-cpu mapping into
  837. * the TLB when needed. head.S already did this for cpu0.
  838. */
  839. ia64_set_kr(IA64_KR_PER_CPU_DATA,
  840. ia64_tpa(cpu_data) - (long) __per_cpu_start);
  841. }
  842. #endif
  843. get_cache_info();
  844. /*
  845. * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
  846. * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
  847. * depends on the data returned by identify_cpu(). We break the dependency by
  848. * accessing cpu_data() through the canonical per-CPU address.
  849. */
  850. cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
  851. identify_cpu(cpu_info);
  852. #ifdef CONFIG_MCKINLEY
  853. {
  854. # define FEATURE_SET 16
  855. struct ia64_pal_retval iprv;
  856. if (cpu_info->family == 0x1f) {
  857. PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
  858. if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
  859. PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
  860. (iprv.v1 | 0x80), FEATURE_SET, 0);
  861. }
  862. }
  863. #endif
  864. /* Clear the stack memory reserved for pt_regs: */
  865. memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
  866. ia64_set_kr(IA64_KR_FPU_OWNER, 0);
  867. /*
  868. * Initialize the page-table base register to a global
  869. * directory with all zeroes. This ensure that we can handle
  870. * TLB-misses to user address-space even before we created the
  871. * first user address-space. This may happen, e.g., due to
  872. * aggressive use of lfetch.fault.
  873. */
  874. ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
  875. /*
  876. * Initialize default control register to defer speculative faults except
  877. * for those arising from TLB misses, which are not deferred. The
  878. * kernel MUST NOT depend on a particular setting of these bits (in other words,
  879. * the kernel must have recovery code for all speculative accesses). Turn on
  880. * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
  881. * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
  882. * be fine).
  883. */
  884. ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
  885. | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
  886. atomic_inc(&init_mm.mm_count);
  887. current->active_mm = &init_mm;
  888. if (current->mm)
  889. BUG();
  890. ia64_mmu_init(ia64_imva(cpu_data));
  891. ia64_mca_cpu_init(ia64_imva(cpu_data));
  892. #ifdef CONFIG_IA32_SUPPORT
  893. ia32_cpu_init();
  894. #endif
  895. /* Clear ITC to eliminate sched_clock() overflows in human time. */
  896. ia64_set_itc(0);
  897. /* disable all local interrupt sources: */
  898. ia64_set_itv(1 << 16);
  899. ia64_set_lrr0(1 << 16);
  900. ia64_set_lrr1(1 << 16);
  901. ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
  902. ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
  903. /* clear TPR & XTP to enable all interrupt classes: */
  904. ia64_setreg(_IA64_REG_CR_TPR, 0);
  905. /* Clear any pending interrupts left by SAL/EFI */
  906. while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
  907. ia64_eoi();
  908. #ifdef CONFIG_SMP
  909. normal_xtp();
  910. #endif
  911. /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
  912. if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
  913. max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
  914. setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
  915. } else {
  916. printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
  917. max_ctx = (1U << 15) - 1; /* use architected minimum */
  918. }
  919. while (max_ctx < ia64_ctx.max_ctx) {
  920. unsigned int old = ia64_ctx.max_ctx;
  921. if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
  922. break;
  923. }
  924. if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
  925. printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
  926. "stacked regs\n");
  927. num_phys_stacked = 96;
  928. }
  929. /* size of physical stacked register partition plus 8 bytes: */
  930. if (num_phys_stacked > max_num_phys_stacked) {
  931. ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
  932. max_num_phys_stacked = num_phys_stacked;
  933. }
  934. platform_cpu_init();
  935. pm_idle = default_idle;
  936. }
  937. void __init
  938. check_bugs (void)
  939. {
  940. ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
  941. (unsigned long) __end___mckinley_e9_bundles);
  942. }
  943. static int __init run_dmi_scan(void)
  944. {
  945. dmi_scan_machine();
  946. return 0;
  947. }
  948. core_initcall(run_dmi_scan);