common.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150
  1. #include <linux/init.h>
  2. #include <linux/kernel.h>
  3. #include <linux/sched.h>
  4. #include <linux/string.h>
  5. #include <linux/bootmem.h>
  6. #include <linux/bitops.h>
  7. #include <linux/module.h>
  8. #include <linux/kgdb.h>
  9. #include <linux/topology.h>
  10. #include <linux/delay.h>
  11. #include <linux/smp.h>
  12. #include <linux/percpu.h>
  13. #include <asm/i387.h>
  14. #include <asm/msr.h>
  15. #include <asm/io.h>
  16. #include <asm/linkage.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/mtrr.h>
  19. #include <asm/mce.h>
  20. #include <asm/pat.h>
  21. #include <asm/asm.h>
  22. #include <asm/numa.h>
  23. #include <asm/smp.h>
  24. #include <asm/cpu.h>
  25. #include <asm/cpumask.h>
  26. #ifdef CONFIG_X86_LOCAL_APIC
  27. #include <asm/mpspec.h>
  28. #include <asm/apic.h>
  29. #include <mach_apic.h>
  30. #include <asm/genapic.h>
  31. #include <asm/uv/uv.h>
  32. #endif
  33. #include <asm/pgtable.h>
  34. #include <asm/processor.h>
  35. #include <asm/desc.h>
  36. #include <asm/atomic.h>
  37. #include <asm/proto.h>
  38. #include <asm/sections.h>
  39. #include <asm/setup.h>
  40. #include <asm/hypervisor.h>
  41. #include "cpu.h"
  42. #ifdef CONFIG_X86_64
  43. /* all of these masks are initialized in setup_cpu_local_masks() */
  44. cpumask_var_t cpu_callin_mask;
  45. cpumask_var_t cpu_callout_mask;
  46. cpumask_var_t cpu_initialized_mask;
  47. /* representing cpus for which sibling maps can be computed */
  48. cpumask_var_t cpu_sibling_setup_mask;
  49. /* correctly size the local cpu masks */
  50. void setup_cpu_local_masks(void)
  51. {
  52. alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  53. alloc_bootmem_cpumask_var(&cpu_callin_mask);
  54. alloc_bootmem_cpumask_var(&cpu_callout_mask);
  55. alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  56. }
  57. #else /* CONFIG_X86_32 */
  58. cpumask_t cpu_callin_map;
  59. cpumask_t cpu_callout_map;
  60. cpumask_t cpu_initialized;
  61. cpumask_t cpu_sibling_setup_map;
  62. #endif /* CONFIG_X86_32 */
  63. static struct cpu_dev *this_cpu __cpuinitdata;
  64. DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  65. #ifdef CONFIG_X86_64
  66. /*
  67. * We need valid kernel segments for data and code in long mode too
  68. * IRET will check the segment types kkeil 2000/10/28
  69. * Also sysret mandates a special GDT layout
  70. *
  71. * The TLS descriptors are currently at a different place compared to i386.
  72. * Hopefully nobody expects them at a fixed place (Wine?)
  73. */
  74. [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
  75. [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
  76. [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
  77. [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
  78. [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
  79. [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
  80. #else
  81. [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
  82. [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
  83. [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
  84. [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
  85. /*
  86. * Segments used for calling PnP BIOS have byte granularity.
  87. * They code segments and data segments have fixed 64k limits,
  88. * the transfer segment sizes are set at run time.
  89. */
  90. /* 32-bit code */
  91. [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
  92. /* 16-bit code */
  93. [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
  94. /* 16-bit data */
  95. [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
  96. /* 16-bit data */
  97. [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
  98. /* 16-bit data */
  99. [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
  100. /*
  101. * The APM segments have byte granularity and their bases
  102. * are set at run time. All have 64k limits.
  103. */
  104. /* 32-bit code */
  105. [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
  106. /* 16-bit code */
  107. [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
  108. /* data */
  109. [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
  110. [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
  111. [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
  112. #endif
  113. } };
  114. EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
  115. #ifdef CONFIG_X86_32
  116. static int cachesize_override __cpuinitdata = -1;
  117. static int disable_x86_serial_nr __cpuinitdata = 1;
  118. static int __init cachesize_setup(char *str)
  119. {
  120. get_option(&str, &cachesize_override);
  121. return 1;
  122. }
  123. __setup("cachesize=", cachesize_setup);
  124. static int __init x86_fxsr_setup(char *s)
  125. {
  126. setup_clear_cpu_cap(X86_FEATURE_FXSR);
  127. setup_clear_cpu_cap(X86_FEATURE_XMM);
  128. return 1;
  129. }
  130. __setup("nofxsr", x86_fxsr_setup);
  131. static int __init x86_sep_setup(char *s)
  132. {
  133. setup_clear_cpu_cap(X86_FEATURE_SEP);
  134. return 1;
  135. }
  136. __setup("nosep", x86_sep_setup);
  137. /* Standard macro to see if a specific flag is changeable */
  138. static inline int flag_is_changeable_p(u32 flag)
  139. {
  140. u32 f1, f2;
  141. /*
  142. * Cyrix and IDT cpus allow disabling of CPUID
  143. * so the code below may return different results
  144. * when it is executed before and after enabling
  145. * the CPUID. Add "volatile" to not allow gcc to
  146. * optimize the subsequent calls to this function.
  147. */
  148. asm volatile ("pushfl\n\t"
  149. "pushfl\n\t"
  150. "popl %0\n\t"
  151. "movl %0,%1\n\t"
  152. "xorl %2,%0\n\t"
  153. "pushl %0\n\t"
  154. "popfl\n\t"
  155. "pushfl\n\t"
  156. "popl %0\n\t"
  157. "popfl\n\t"
  158. : "=&r" (f1), "=&r" (f2)
  159. : "ir" (flag));
  160. return ((f1^f2) & flag) != 0;
  161. }
  162. /* Probe for the CPUID instruction */
  163. static int __cpuinit have_cpuid_p(void)
  164. {
  165. return flag_is_changeable_p(X86_EFLAGS_ID);
  166. }
  167. static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
  168. {
  169. if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
  170. /* Disable processor serial number */
  171. unsigned long lo, hi;
  172. rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
  173. lo |= 0x200000;
  174. wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
  175. printk(KERN_NOTICE "CPU serial number disabled.\n");
  176. clear_cpu_cap(c, X86_FEATURE_PN);
  177. /* Disabling the serial number may affect the cpuid level */
  178. c->cpuid_level = cpuid_eax(0);
  179. }
  180. }
  181. static int __init x86_serial_nr_setup(char *s)
  182. {
  183. disable_x86_serial_nr = 0;
  184. return 1;
  185. }
  186. __setup("serialnumber", x86_serial_nr_setup);
  187. #else
  188. static inline int flag_is_changeable_p(u32 flag)
  189. {
  190. return 1;
  191. }
  192. /* Probe for the CPUID instruction */
  193. static inline int have_cpuid_p(void)
  194. {
  195. return 1;
  196. }
  197. static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
  198. {
  199. }
  200. #endif
  201. /*
  202. * Naming convention should be: <Name> [(<Codename>)]
  203. * This table only is used unless init_<vendor>() below doesn't set it;
  204. * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
  205. *
  206. */
  207. /* Look up CPU names by table lookup. */
  208. static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
  209. {
  210. struct cpu_model_info *info;
  211. if (c->x86_model >= 16)
  212. return NULL; /* Range check */
  213. if (!this_cpu)
  214. return NULL;
  215. info = this_cpu->c_models;
  216. while (info && info->family) {
  217. if (info->family == c->x86)
  218. return info->model_names[c->x86_model];
  219. info++;
  220. }
  221. return NULL; /* Not found */
  222. }
  223. __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
  224. /* Current gdt points %fs at the "master" per-cpu area: after this,
  225. * it's on the real one. */
  226. void switch_to_new_gdt(void)
  227. {
  228. struct desc_ptr gdt_descr;
  229. gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
  230. gdt_descr.size = GDT_SIZE - 1;
  231. load_gdt(&gdt_descr);
  232. #ifdef CONFIG_X86_32
  233. asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
  234. #endif
  235. }
  236. static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
  237. static void __cpuinit default_init(struct cpuinfo_x86 *c)
  238. {
  239. #ifdef CONFIG_X86_64
  240. display_cacheinfo(c);
  241. #else
  242. /* Not much we can do here... */
  243. /* Check if at least it has cpuid */
  244. if (c->cpuid_level == -1) {
  245. /* No cpuid. It must be an ancient CPU */
  246. if (c->x86 == 4)
  247. strcpy(c->x86_model_id, "486");
  248. else if (c->x86 == 3)
  249. strcpy(c->x86_model_id, "386");
  250. }
  251. #endif
  252. }
  253. static struct cpu_dev __cpuinitdata default_cpu = {
  254. .c_init = default_init,
  255. .c_vendor = "Unknown",
  256. .c_x86_vendor = X86_VENDOR_UNKNOWN,
  257. };
  258. static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
  259. {
  260. unsigned int *v;
  261. char *p, *q;
  262. if (c->extended_cpuid_level < 0x80000004)
  263. return;
  264. v = (unsigned int *) c->x86_model_id;
  265. cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
  266. cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
  267. cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
  268. c->x86_model_id[48] = 0;
  269. /* Intel chips right-justify this string for some dumb reason;
  270. undo that brain damage */
  271. p = q = &c->x86_model_id[0];
  272. while (*p == ' ')
  273. p++;
  274. if (p != q) {
  275. while (*p)
  276. *q++ = *p++;
  277. while (q <= &c->x86_model_id[48])
  278. *q++ = '\0'; /* Zero-pad the rest */
  279. }
  280. }
  281. void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
  282. {
  283. unsigned int n, dummy, ebx, ecx, edx, l2size;
  284. n = c->extended_cpuid_level;
  285. if (n >= 0x80000005) {
  286. cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
  287. printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
  288. edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
  289. c->x86_cache_size = (ecx>>24) + (edx>>24);
  290. #ifdef CONFIG_X86_64
  291. /* On K8 L1 TLB is inclusive, so don't count it */
  292. c->x86_tlbsize = 0;
  293. #endif
  294. }
  295. if (n < 0x80000006) /* Some chips just has a large L1. */
  296. return;
  297. cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
  298. l2size = ecx >> 16;
  299. #ifdef CONFIG_X86_64
  300. c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
  301. #else
  302. /* do processor-specific cache resizing */
  303. if (this_cpu->c_size_cache)
  304. l2size = this_cpu->c_size_cache(c, l2size);
  305. /* Allow user to override all this if necessary. */
  306. if (cachesize_override != -1)
  307. l2size = cachesize_override;
  308. if (l2size == 0)
  309. return; /* Again, no L2 cache is possible */
  310. #endif
  311. c->x86_cache_size = l2size;
  312. printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
  313. l2size, ecx & 0xFF);
  314. }
  315. void __cpuinit detect_ht(struct cpuinfo_x86 *c)
  316. {
  317. #ifdef CONFIG_X86_HT
  318. u32 eax, ebx, ecx, edx;
  319. int index_msb, core_bits;
  320. if (!cpu_has(c, X86_FEATURE_HT))
  321. return;
  322. if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
  323. goto out;
  324. if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
  325. return;
  326. cpuid(1, &eax, &ebx, &ecx, &edx);
  327. smp_num_siblings = (ebx & 0xff0000) >> 16;
  328. if (smp_num_siblings == 1) {
  329. printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
  330. } else if (smp_num_siblings > 1) {
  331. if (smp_num_siblings > nr_cpu_ids) {
  332. printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
  333. smp_num_siblings);
  334. smp_num_siblings = 1;
  335. return;
  336. }
  337. index_msb = get_count_order(smp_num_siblings);
  338. #ifdef CONFIG_X86_64
  339. c->phys_proc_id = phys_pkg_id(index_msb);
  340. #else
  341. c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
  342. #endif
  343. smp_num_siblings = smp_num_siblings / c->x86_max_cores;
  344. index_msb = get_count_order(smp_num_siblings);
  345. core_bits = get_count_order(c->x86_max_cores);
  346. #ifdef CONFIG_X86_64
  347. c->cpu_core_id = phys_pkg_id(index_msb) &
  348. ((1 << core_bits) - 1);
  349. #else
  350. c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
  351. ((1 << core_bits) - 1);
  352. #endif
  353. }
  354. out:
  355. if ((c->x86_max_cores * smp_num_siblings) > 1) {
  356. printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
  357. c->phys_proc_id);
  358. printk(KERN_INFO "CPU: Processor Core ID: %d\n",
  359. c->cpu_core_id);
  360. }
  361. #endif
  362. }
  363. static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
  364. {
  365. char *v = c->x86_vendor_id;
  366. int i;
  367. static int printed;
  368. for (i = 0; i < X86_VENDOR_NUM; i++) {
  369. if (!cpu_devs[i])
  370. break;
  371. if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
  372. (cpu_devs[i]->c_ident[1] &&
  373. !strcmp(v, cpu_devs[i]->c_ident[1]))) {
  374. this_cpu = cpu_devs[i];
  375. c->x86_vendor = this_cpu->c_x86_vendor;
  376. return;
  377. }
  378. }
  379. if (!printed) {
  380. printed++;
  381. printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v);
  382. printk(KERN_ERR "CPU: Your system may be unstable.\n");
  383. }
  384. c->x86_vendor = X86_VENDOR_UNKNOWN;
  385. this_cpu = &default_cpu;
  386. }
  387. void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
  388. {
  389. /* Get vendor name */
  390. cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
  391. (unsigned int *)&c->x86_vendor_id[0],
  392. (unsigned int *)&c->x86_vendor_id[8],
  393. (unsigned int *)&c->x86_vendor_id[4]);
  394. c->x86 = 4;
  395. /* Intel-defined flags: level 0x00000001 */
  396. if (c->cpuid_level >= 0x00000001) {
  397. u32 junk, tfms, cap0, misc;
  398. cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
  399. c->x86 = (tfms >> 8) & 0xf;
  400. c->x86_model = (tfms >> 4) & 0xf;
  401. c->x86_mask = tfms & 0xf;
  402. if (c->x86 == 0xf)
  403. c->x86 += (tfms >> 20) & 0xff;
  404. if (c->x86 >= 0x6)
  405. c->x86_model += ((tfms >> 16) & 0xf) << 4;
  406. if (cap0 & (1<<19)) {
  407. c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
  408. c->x86_cache_alignment = c->x86_clflush_size;
  409. }
  410. }
  411. }
  412. static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
  413. {
  414. u32 tfms, xlvl;
  415. u32 ebx;
  416. /* Intel-defined flags: level 0x00000001 */
  417. if (c->cpuid_level >= 0x00000001) {
  418. u32 capability, excap;
  419. cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
  420. c->x86_capability[0] = capability;
  421. c->x86_capability[4] = excap;
  422. }
  423. /* AMD-defined flags: level 0x80000001 */
  424. xlvl = cpuid_eax(0x80000000);
  425. c->extended_cpuid_level = xlvl;
  426. if ((xlvl & 0xffff0000) == 0x80000000) {
  427. if (xlvl >= 0x80000001) {
  428. c->x86_capability[1] = cpuid_edx(0x80000001);
  429. c->x86_capability[6] = cpuid_ecx(0x80000001);
  430. }
  431. }
  432. #ifdef CONFIG_X86_64
  433. if (c->extended_cpuid_level >= 0x80000008) {
  434. u32 eax = cpuid_eax(0x80000008);
  435. c->x86_virt_bits = (eax >> 8) & 0xff;
  436. c->x86_phys_bits = eax & 0xff;
  437. }
  438. #endif
  439. if (c->extended_cpuid_level >= 0x80000007)
  440. c->x86_power = cpuid_edx(0x80000007);
  441. }
  442. static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
  443. {
  444. #ifdef CONFIG_X86_32
  445. int i;
  446. /*
  447. * First of all, decide if this is a 486 or higher
  448. * It's a 486 if we can modify the AC flag
  449. */
  450. if (flag_is_changeable_p(X86_EFLAGS_AC))
  451. c->x86 = 4;
  452. else
  453. c->x86 = 3;
  454. for (i = 0; i < X86_VENDOR_NUM; i++)
  455. if (cpu_devs[i] && cpu_devs[i]->c_identify) {
  456. c->x86_vendor_id[0] = 0;
  457. cpu_devs[i]->c_identify(c);
  458. if (c->x86_vendor_id[0]) {
  459. get_cpu_vendor(c);
  460. break;
  461. }
  462. }
  463. #endif
  464. }
  465. /*
  466. * Do minimum CPU detection early.
  467. * Fields really needed: vendor, cpuid_level, family, model, mask,
  468. * cache alignment.
  469. * The others are not touched to avoid unwanted side effects.
  470. *
  471. * WARNING: this function is only called on the BP. Don't add code here
  472. * that is supposed to run on all CPUs.
  473. */
  474. static void __init early_identify_cpu(struct cpuinfo_x86 *c)
  475. {
  476. #ifdef CONFIG_X86_64
  477. c->x86_clflush_size = 64;
  478. #else
  479. c->x86_clflush_size = 32;
  480. #endif
  481. c->x86_cache_alignment = c->x86_clflush_size;
  482. memset(&c->x86_capability, 0, sizeof c->x86_capability);
  483. c->extended_cpuid_level = 0;
  484. if (!have_cpuid_p())
  485. identify_cpu_without_cpuid(c);
  486. /* cyrix could have cpuid enabled via c_identify()*/
  487. if (!have_cpuid_p())
  488. return;
  489. cpu_detect(c);
  490. get_cpu_vendor(c);
  491. get_cpu_cap(c);
  492. if (this_cpu->c_early_init)
  493. this_cpu->c_early_init(c);
  494. validate_pat_support(c);
  495. #ifdef CONFIG_SMP
  496. c->cpu_index = boot_cpu_id;
  497. #endif
  498. }
  499. void __init early_cpu_init(void)
  500. {
  501. struct cpu_dev **cdev;
  502. int count = 0;
  503. printk("KERNEL supported cpus:\n");
  504. for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
  505. struct cpu_dev *cpudev = *cdev;
  506. unsigned int j;
  507. if (count >= X86_VENDOR_NUM)
  508. break;
  509. cpu_devs[count] = cpudev;
  510. count++;
  511. for (j = 0; j < 2; j++) {
  512. if (!cpudev->c_ident[j])
  513. continue;
  514. printk(" %s %s\n", cpudev->c_vendor,
  515. cpudev->c_ident[j]);
  516. }
  517. }
  518. early_identify_cpu(&boot_cpu_data);
  519. }
  520. /*
  521. * The NOPL instruction is supposed to exist on all CPUs with
  522. * family >= 6; unfortunately, that's not true in practice because
  523. * of early VIA chips and (more importantly) broken virtualizers that
  524. * are not easy to detect. In the latter case it doesn't even *fail*
  525. * reliably, so probing for it doesn't even work. Disable it completely
  526. * unless we can find a reliable way to detect all the broken cases.
  527. */
  528. static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
  529. {
  530. clear_cpu_cap(c, X86_FEATURE_NOPL);
  531. }
  532. static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
  533. {
  534. c->extended_cpuid_level = 0;
  535. if (!have_cpuid_p())
  536. identify_cpu_without_cpuid(c);
  537. /* cyrix could have cpuid enabled via c_identify()*/
  538. if (!have_cpuid_p())
  539. return;
  540. cpu_detect(c);
  541. get_cpu_vendor(c);
  542. get_cpu_cap(c);
  543. if (c->cpuid_level >= 0x00000001) {
  544. c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
  545. #ifdef CONFIG_X86_32
  546. # ifdef CONFIG_X86_HT
  547. c->apicid = phys_pkg_id(c->initial_apicid, 0);
  548. # else
  549. c->apicid = c->initial_apicid;
  550. # endif
  551. #endif
  552. #ifdef CONFIG_X86_HT
  553. c->phys_proc_id = c->initial_apicid;
  554. #endif
  555. }
  556. get_model_name(c); /* Default name */
  557. init_scattered_cpuid_features(c);
  558. detect_nopl(c);
  559. }
  560. /*
  561. * This does the hard work of actually picking apart the CPU stuff...
  562. */
  563. static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  564. {
  565. int i;
  566. c->loops_per_jiffy = loops_per_jiffy;
  567. c->x86_cache_size = -1;
  568. c->x86_vendor = X86_VENDOR_UNKNOWN;
  569. c->x86_model = c->x86_mask = 0; /* So far unknown... */
  570. c->x86_vendor_id[0] = '\0'; /* Unset */
  571. c->x86_model_id[0] = '\0'; /* Unset */
  572. c->x86_max_cores = 1;
  573. c->x86_coreid_bits = 0;
  574. #ifdef CONFIG_X86_64
  575. c->x86_clflush_size = 64;
  576. #else
  577. c->cpuid_level = -1; /* CPUID not detected */
  578. c->x86_clflush_size = 32;
  579. #endif
  580. c->x86_cache_alignment = c->x86_clflush_size;
  581. memset(&c->x86_capability, 0, sizeof c->x86_capability);
  582. generic_identify(c);
  583. if (this_cpu->c_identify)
  584. this_cpu->c_identify(c);
  585. #ifdef CONFIG_X86_64
  586. c->apicid = phys_pkg_id(0);
  587. #endif
  588. /*
  589. * Vendor-specific initialization. In this section we
  590. * canonicalize the feature flags, meaning if there are
  591. * features a certain CPU supports which CPUID doesn't
  592. * tell us, CPUID claiming incorrect flags, or other bugs,
  593. * we handle them here.
  594. *
  595. * At the end of this section, c->x86_capability better
  596. * indicate the features this CPU genuinely supports!
  597. */
  598. if (this_cpu->c_init)
  599. this_cpu->c_init(c);
  600. /* Disable the PN if appropriate */
  601. squash_the_stupid_serial_number(c);
  602. /*
  603. * The vendor-specific functions might have changed features. Now
  604. * we do "generic changes."
  605. */
  606. /* If the model name is still unset, do table lookup. */
  607. if (!c->x86_model_id[0]) {
  608. char *p;
  609. p = table_lookup_model(c);
  610. if (p)
  611. strcpy(c->x86_model_id, p);
  612. else
  613. /* Last resort... */
  614. sprintf(c->x86_model_id, "%02x/%02x",
  615. c->x86, c->x86_model);
  616. }
  617. #ifdef CONFIG_X86_64
  618. detect_ht(c);
  619. #endif
  620. init_hypervisor(c);
  621. /*
  622. * On SMP, boot_cpu_data holds the common feature set between
  623. * all CPUs; so make sure that we indicate which features are
  624. * common between the CPUs. The first time this routine gets
  625. * executed, c == &boot_cpu_data.
  626. */
  627. if (c != &boot_cpu_data) {
  628. /* AND the already accumulated flags with these */
  629. for (i = 0; i < NCAPINTS; i++)
  630. boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
  631. }
  632. /* Clear all flags overriden by options */
  633. for (i = 0; i < NCAPINTS; i++)
  634. c->x86_capability[i] &= ~cleared_cpu_caps[i];
  635. #ifdef CONFIG_X86_MCE
  636. /* Init Machine Check Exception if available. */
  637. mcheck_init(c);
  638. #endif
  639. select_idle_routine(c);
  640. #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
  641. numa_add_cpu(smp_processor_id());
  642. #endif
  643. }
  644. #ifdef CONFIG_X86_64
  645. static void vgetcpu_set_mode(void)
  646. {
  647. if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
  648. vgetcpu_mode = VGETCPU_RDTSCP;
  649. else
  650. vgetcpu_mode = VGETCPU_LSL;
  651. }
  652. #endif
  653. void __init identify_boot_cpu(void)
  654. {
  655. identify_cpu(&boot_cpu_data);
  656. #ifdef CONFIG_X86_32
  657. sysenter_setup();
  658. enable_sep_cpu();
  659. #else
  660. vgetcpu_set_mode();
  661. #endif
  662. }
  663. void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
  664. {
  665. BUG_ON(c == &boot_cpu_data);
  666. identify_cpu(c);
  667. #ifdef CONFIG_X86_32
  668. enable_sep_cpu();
  669. #endif
  670. mtrr_ap_init();
  671. }
  672. struct msr_range {
  673. unsigned min;
  674. unsigned max;
  675. };
  676. static struct msr_range msr_range_array[] __cpuinitdata = {
  677. { 0x00000000, 0x00000418},
  678. { 0xc0000000, 0xc000040b},
  679. { 0xc0010000, 0xc0010142},
  680. { 0xc0011000, 0xc001103b},
  681. };
  682. static void __cpuinit print_cpu_msr(void)
  683. {
  684. unsigned index;
  685. u64 val;
  686. int i;
  687. unsigned index_min, index_max;
  688. for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
  689. index_min = msr_range_array[i].min;
  690. index_max = msr_range_array[i].max;
  691. for (index = index_min; index < index_max; index++) {
  692. if (rdmsrl_amd_safe(index, &val))
  693. continue;
  694. printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
  695. }
  696. }
  697. }
  698. static int show_msr __cpuinitdata;
  699. static __init int setup_show_msr(char *arg)
  700. {
  701. int num;
  702. get_option(&arg, &num);
  703. if (num > 0)
  704. show_msr = num;
  705. return 1;
  706. }
  707. __setup("show_msr=", setup_show_msr);
  708. static __init int setup_noclflush(char *arg)
  709. {
  710. setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
  711. return 1;
  712. }
  713. __setup("noclflush", setup_noclflush);
  714. void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
  715. {
  716. char *vendor = NULL;
  717. if (c->x86_vendor < X86_VENDOR_NUM)
  718. vendor = this_cpu->c_vendor;
  719. else if (c->cpuid_level >= 0)
  720. vendor = c->x86_vendor_id;
  721. if (vendor && !strstr(c->x86_model_id, vendor))
  722. printk(KERN_CONT "%s ", vendor);
  723. if (c->x86_model_id[0])
  724. printk(KERN_CONT "%s", c->x86_model_id);
  725. else
  726. printk(KERN_CONT "%d86", c->x86);
  727. if (c->x86_mask || c->cpuid_level >= 0)
  728. printk(KERN_CONT " stepping %02x\n", c->x86_mask);
  729. else
  730. printk(KERN_CONT "\n");
  731. #ifdef CONFIG_SMP
  732. if (c->cpu_index < show_msr)
  733. print_cpu_msr();
  734. #else
  735. if (show_msr)
  736. print_cpu_msr();
  737. #endif
  738. }
  739. static __init int setup_disablecpuid(char *arg)
  740. {
  741. int bit;
  742. if (get_option(&arg, &bit) && bit < NCAPINTS*32)
  743. setup_clear_cpu_cap(bit);
  744. else
  745. return 0;
  746. return 1;
  747. }
  748. __setup("clearcpuid=", setup_disablecpuid);
  749. #ifdef CONFIG_X86_64
  750. struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
  751. DEFINE_PER_CPU_FIRST(union irq_stack_union,
  752. irq_stack_union) __aligned(PAGE_SIZE);
  753. #ifdef CONFIG_SMP
  754. DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
  755. #else
  756. DEFINE_PER_CPU(char *, irq_stack_ptr) =
  757. per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
  758. #endif
  759. DEFINE_PER_CPU(unsigned long, kernel_stack) =
  760. (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
  761. EXPORT_PER_CPU_SYMBOL(kernel_stack);
  762. DEFINE_PER_CPU(unsigned int, irq_count) = -1;
  763. static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  764. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
  765. __aligned(PAGE_SIZE);
  766. extern asmlinkage void ignore_sysret(void);
  767. /* May not be marked __init: used by software suspend */
  768. void syscall_init(void)
  769. {
  770. /*
  771. * LSTAR and STAR live in a bit strange symbiosis.
  772. * They both write to the same internal register. STAR allows to
  773. * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
  774. */
  775. wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
  776. wrmsrl(MSR_LSTAR, system_call);
  777. wrmsrl(MSR_CSTAR, ignore_sysret);
  778. #ifdef CONFIG_IA32_EMULATION
  779. syscall32_cpu_init();
  780. #endif
  781. /* Flags to clear on syscall */
  782. wrmsrl(MSR_SYSCALL_MASK,
  783. X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
  784. }
  785. unsigned long kernel_eflags;
  786. /*
  787. * Copies of the original ist values from the tss are only accessed during
  788. * debugging, no special alignment required.
  789. */
  790. DEFINE_PER_CPU(struct orig_ist, orig_ist);
  791. #else
  792. /* Make sure %fs is initialized properly in idle threads */
  793. struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
  794. {
  795. memset(regs, 0, sizeof(struct pt_regs));
  796. regs->fs = __KERNEL_PERCPU;
  797. return regs;
  798. }
  799. #endif
  800. /*
  801. * cpu_init() initializes state that is per-CPU. Some data is already
  802. * initialized (naturally) in the bootstrap process, such as the GDT
  803. * and IDT. We reload them nevertheless, this function acts as a
  804. * 'CPU state barrier', nothing should get across.
  805. * A lot of state is already set up in PDA init for 64 bit
  806. */
  807. #ifdef CONFIG_X86_64
  808. void __cpuinit cpu_init(void)
  809. {
  810. int cpu = stack_smp_processor_id();
  811. struct tss_struct *t = &per_cpu(init_tss, cpu);
  812. struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
  813. unsigned long v;
  814. struct task_struct *me;
  815. int i;
  816. loadsegment(fs, 0);
  817. loadsegment(gs, 0);
  818. load_gs_base(cpu);
  819. #ifdef CONFIG_NUMA
  820. if (cpu != 0 && percpu_read(node_number) == 0 &&
  821. cpu_to_node(cpu) != NUMA_NO_NODE)
  822. percpu_write(node_number, cpu_to_node(cpu));
  823. #endif
  824. me = current;
  825. if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
  826. panic("CPU#%d already initialized!\n", cpu);
  827. printk(KERN_INFO "Initializing CPU#%d\n", cpu);
  828. clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
  829. /*
  830. * Initialize the per-CPU GDT with the boot GDT,
  831. * and set up the GDT descriptor:
  832. */
  833. switch_to_new_gdt();
  834. load_idt((const struct desc_ptr *)&idt_descr);
  835. memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
  836. syscall_init();
  837. wrmsrl(MSR_FS_BASE, 0);
  838. wrmsrl(MSR_KERNEL_GS_BASE, 0);
  839. barrier();
  840. check_efer();
  841. if (cpu != 0 && x2apic)
  842. enable_x2apic();
  843. /*
  844. * set up and load the per-CPU TSS
  845. */
  846. if (!orig_ist->ist[0]) {
  847. static const unsigned int sizes[N_EXCEPTION_STACKS] = {
  848. [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
  849. [DEBUG_STACK - 1] = DEBUG_STKSZ
  850. };
  851. char *estacks = per_cpu(exception_stacks, cpu);
  852. for (v = 0; v < N_EXCEPTION_STACKS; v++) {
  853. estacks += sizes[v];
  854. orig_ist->ist[v] = t->x86_tss.ist[v] =
  855. (unsigned long)estacks;
  856. }
  857. }
  858. t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
  859. /*
  860. * <= is required because the CPU will access up to
  861. * 8 bits beyond the end of the IO permission bitmap.
  862. */
  863. for (i = 0; i <= IO_BITMAP_LONGS; i++)
  864. t->io_bitmap[i] = ~0UL;
  865. atomic_inc(&init_mm.mm_count);
  866. me->active_mm = &init_mm;
  867. if (me->mm)
  868. BUG();
  869. enter_lazy_tlb(&init_mm, me);
  870. load_sp0(t, &current->thread);
  871. set_tss_desc(cpu, t);
  872. load_TR_desc();
  873. load_LDT(&init_mm.context);
  874. #ifdef CONFIG_KGDB
  875. /*
  876. * If the kgdb is connected no debug regs should be altered. This
  877. * is only applicable when KGDB and a KGDB I/O module are built
  878. * into the kernel and you are using early debugging with
  879. * kgdbwait. KGDB will control the kernel HW breakpoint registers.
  880. */
  881. if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
  882. arch_kgdb_ops.correct_hw_break();
  883. else {
  884. #endif
  885. /*
  886. * Clear all 6 debug registers:
  887. */
  888. set_debugreg(0UL, 0);
  889. set_debugreg(0UL, 1);
  890. set_debugreg(0UL, 2);
  891. set_debugreg(0UL, 3);
  892. set_debugreg(0UL, 6);
  893. set_debugreg(0UL, 7);
  894. #ifdef CONFIG_KGDB
  895. /* If the kgdb is connected no debug regs should be altered. */
  896. }
  897. #endif
  898. fpu_init();
  899. raw_local_save_flags(kernel_eflags);
  900. if (is_uv_system())
  901. uv_cpu_init();
  902. }
  903. #else
  904. void __cpuinit cpu_init(void)
  905. {
  906. int cpu = smp_processor_id();
  907. struct task_struct *curr = current;
  908. struct tss_struct *t = &per_cpu(init_tss, cpu);
  909. struct thread_struct *thread = &curr->thread;
  910. if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
  911. printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
  912. for (;;) local_irq_enable();
  913. }
  914. printk(KERN_INFO "Initializing CPU#%d\n", cpu);
  915. if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
  916. clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
  917. load_idt(&idt_descr);
  918. switch_to_new_gdt();
  919. /*
  920. * Set up and load the per-CPU TSS and LDT
  921. */
  922. atomic_inc(&init_mm.mm_count);
  923. curr->active_mm = &init_mm;
  924. if (curr->mm)
  925. BUG();
  926. enter_lazy_tlb(&init_mm, curr);
  927. load_sp0(t, thread);
  928. set_tss_desc(cpu, t);
  929. load_TR_desc();
  930. load_LDT(&init_mm.context);
  931. #ifdef CONFIG_DOUBLEFAULT
  932. /* Set up doublefault TSS pointer in the GDT */
  933. __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
  934. #endif
  935. /* Clear %gs. */
  936. asm volatile ("mov %0, %%gs" : : "r" (0));
  937. /* Clear all 6 debug registers: */
  938. set_debugreg(0, 0);
  939. set_debugreg(0, 1);
  940. set_debugreg(0, 2);
  941. set_debugreg(0, 3);
  942. set_debugreg(0, 6);
  943. set_debugreg(0, 7);
  944. /*
  945. * Force FPU initialization:
  946. */
  947. if (cpu_has_xsave)
  948. current_thread_info()->status = TS_XSAVE;
  949. else
  950. current_thread_info()->status = 0;
  951. clear_used_math();
  952. mxcsr_feature_mask_init();
  953. /*
  954. * Boot processor to setup the FP and extended state context info.
  955. */
  956. if (smp_processor_id() == boot_cpu_id)
  957. init_thread_xstate();
  958. xsave_init();
  959. }
  960. #endif