common.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133
  1. #include <linux/init.h>
  2. #include <linux/kernel.h>
  3. #include <linux/sched.h>
  4. #include <linux/string.h>
  5. #include <linux/bootmem.h>
  6. #include <linux/bitops.h>
  7. #include <linux/module.h>
  8. #include <linux/kgdb.h>
  9. #include <linux/topology.h>
  10. #include <linux/delay.h>
  11. #include <linux/smp.h>
  12. #include <linux/percpu.h>
  13. #include <asm/i387.h>
  14. #include <asm/msr.h>
  15. #include <asm/io.h>
  16. #include <asm/linkage.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/mtrr.h>
  19. #include <asm/mce.h>
  20. #include <asm/pat.h>
  21. #include <asm/asm.h>
  22. #include <asm/numa.h>
  23. #ifdef CONFIG_X86_LOCAL_APIC
  24. #include <asm/mpspec.h>
  25. #include <asm/apic.h>
  26. #include <mach_apic.h>
  27. #include <asm/genapic.h>
  28. #endif
  29. #include <asm/pda.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/processor.h>
  32. #include <asm/desc.h>
  33. #include <asm/atomic.h>
  34. #include <asm/proto.h>
  35. #include <asm/sections.h>
  36. #include <asm/setup.h>
  37. #include "cpu.h"
  38. static struct cpu_dev *this_cpu __cpuinitdata;
  39. #ifdef CONFIG_X86_64
  40. /* We need valid kernel segments for data and code in long mode too
  41. * IRET will check the segment types kkeil 2000/10/28
  42. * Also sysret mandates a special GDT layout
  43. */
  44. /* The TLS descriptors are currently at a different place compared to i386.
  45. Hopefully nobody expects them at a fixed place (Wine?) */
  46. DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
  47. [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
  48. [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
  49. [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
  50. [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
  51. [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
  52. [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
  53. } };
  54. #else
  55. DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  56. [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
  57. [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
  58. [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
  59. [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
  60. /*
  61. * Segments used for calling PnP BIOS have byte granularity.
  62. * They code segments and data segments have fixed 64k limits,
  63. * the transfer segment sizes are set at run time.
  64. */
  65. /* 32-bit code */
  66. [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
  67. /* 16-bit code */
  68. [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
  69. /* 16-bit data */
  70. [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
  71. /* 16-bit data */
  72. [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
  73. /* 16-bit data */
  74. [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
  75. /*
  76. * The APM segments have byte granularity and their bases
  77. * are set at run time. All have 64k limits.
  78. */
  79. /* 32-bit code */
  80. [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
  81. /* 16-bit code */
  82. [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
  83. /* data */
  84. [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
  85. [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
  86. [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
  87. } };
  88. #endif
  89. EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
  90. #ifdef CONFIG_X86_32
  91. static int cachesize_override __cpuinitdata = -1;
  92. static int disable_x86_serial_nr __cpuinitdata = 1;
  93. static int __init cachesize_setup(char *str)
  94. {
  95. get_option(&str, &cachesize_override);
  96. return 1;
  97. }
  98. __setup("cachesize=", cachesize_setup);
  99. /*
  100. * Naming convention should be: <Name> [(<Codename>)]
  101. * This table only is used unless init_<vendor>() below doesn't set it;
  102. * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
  103. *
  104. */
  105. /* Look up CPU names by table lookup. */
  106. static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
  107. {
  108. struct cpu_model_info *info;
  109. if (c->x86_model >= 16)
  110. return NULL; /* Range check */
  111. if (!this_cpu)
  112. return NULL;
  113. info = this_cpu->c_models;
  114. while (info && info->family) {
  115. if (info->family == c->x86)
  116. return info->model_names[c->x86_model];
  117. info++;
  118. }
  119. return NULL; /* Not found */
  120. }
  121. static int __init x86_fxsr_setup(char *s)
  122. {
  123. setup_clear_cpu_cap(X86_FEATURE_FXSR);
  124. setup_clear_cpu_cap(X86_FEATURE_XMM);
  125. return 1;
  126. }
  127. __setup("nofxsr", x86_fxsr_setup);
  128. static int __init x86_sep_setup(char *s)
  129. {
  130. setup_clear_cpu_cap(X86_FEATURE_SEP);
  131. return 1;
  132. }
  133. __setup("nosep", x86_sep_setup);
  134. /* Standard macro to see if a specific flag is changeable */
  135. static inline int flag_is_changeable_p(u32 flag)
  136. {
  137. u32 f1, f2;
  138. asm("pushfl\n\t"
  139. "pushfl\n\t"
  140. "popl %0\n\t"
  141. "movl %0,%1\n\t"
  142. "xorl %2,%0\n\t"
  143. "pushl %0\n\t"
  144. "popfl\n\t"
  145. "pushfl\n\t"
  146. "popl %0\n\t"
  147. "popfl\n\t"
  148. : "=&r" (f1), "=&r" (f2)
  149. : "ir" (flag));
  150. return ((f1^f2) & flag) != 0;
  151. }
  152. /* Probe for the CPUID instruction */
  153. static int __cpuinit have_cpuid_p(void)
  154. {
  155. return flag_is_changeable_p(X86_EFLAGS_ID);
  156. }
  157. static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
  158. {
  159. if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
  160. /* Disable processor serial number */
  161. unsigned long lo, hi;
  162. rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
  163. lo |= 0x200000;
  164. wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
  165. printk(KERN_NOTICE "CPU serial number disabled.\n");
  166. clear_cpu_cap(c, X86_FEATURE_PN);
  167. /* Disabling the serial number may affect the cpuid level */
  168. c->cpuid_level = cpuid_eax(0);
  169. }
  170. }
  171. static int __init x86_serial_nr_setup(char *s)
  172. {
  173. disable_x86_serial_nr = 0;
  174. return 1;
  175. }
  176. __setup("serialnumber", x86_serial_nr_setup);
  177. #else
  178. /* Probe for the CPUID instruction */
  179. static inline int have_cpuid_p(void)
  180. {
  181. return 1;
  182. }
  183. #endif
  184. __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
  185. /* Current gdt points %fs at the "master" per-cpu area: after this,
  186. * it's on the real one. */
  187. void switch_to_new_gdt(void)
  188. {
  189. struct desc_ptr gdt_descr;
  190. gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
  191. gdt_descr.size = GDT_SIZE - 1;
  192. load_gdt(&gdt_descr);
  193. #ifdef CONFIG_X86_32
  194. asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
  195. #endif
  196. }
  197. static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
  198. static void __cpuinit default_init(struct cpuinfo_x86 *c)
  199. {
  200. #ifdef CONFIG_X86_64
  201. display_cacheinfo(c);
  202. #else
  203. /* Not much we can do here... */
  204. /* Check if at least it has cpuid */
  205. if (c->cpuid_level == -1) {
  206. /* No cpuid. It must be an ancient CPU */
  207. if (c->x86 == 4)
  208. strcpy(c->x86_model_id, "486");
  209. else if (c->x86 == 3)
  210. strcpy(c->x86_model_id, "386");
  211. }
  212. #endif
  213. }
  214. static struct cpu_dev __cpuinitdata default_cpu = {
  215. .c_init = default_init,
  216. .c_vendor = "Unknown",
  217. .c_x86_vendor = X86_VENDOR_UNKNOWN,
  218. };
  219. int __cpuinit get_model_name(struct cpuinfo_x86 *c)
  220. {
  221. unsigned int *v;
  222. char *p, *q;
  223. if (c->extended_cpuid_level < 0x80000004)
  224. return 0;
  225. v = (unsigned int *) c->x86_model_id;
  226. cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
  227. cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
  228. cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
  229. c->x86_model_id[48] = 0;
  230. /* Intel chips right-justify this string for some dumb reason;
  231. undo that brain damage */
  232. p = q = &c->x86_model_id[0];
  233. while (*p == ' ')
  234. p++;
  235. if (p != q) {
  236. while (*p)
  237. *q++ = *p++;
  238. while (q <= &c->x86_model_id[48])
  239. *q++ = '\0'; /* Zero-pad the rest */
  240. }
  241. return 1;
  242. }
  243. void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
  244. {
  245. unsigned int n, dummy, ebx, ecx, edx, l2size;
  246. n = c->extended_cpuid_level;
  247. if (n >= 0x80000005) {
  248. cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
  249. printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
  250. edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
  251. c->x86_cache_size = (ecx>>24) + (edx>>24);
  252. #ifdef CONFIG_X86_64
  253. /* On K8 L1 TLB is inclusive, so don't count it */
  254. c->x86_tlbsize = 0;
  255. #endif
  256. }
  257. if (n < 0x80000006) /* Some chips just has a large L1. */
  258. return;
  259. cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
  260. l2size = ecx >> 16;
  261. #ifdef CONFIG_X86_64
  262. c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
  263. #else
  264. /* do processor-specific cache resizing */
  265. if (this_cpu->c_size_cache)
  266. l2size = this_cpu->c_size_cache(c, l2size);
  267. /* Allow user to override all this if necessary. */
  268. if (cachesize_override != -1)
  269. l2size = cachesize_override;
  270. if (l2size == 0)
  271. return; /* Again, no L2 cache is possible */
  272. #endif
  273. c->x86_cache_size = l2size;
  274. printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
  275. l2size, ecx & 0xFF);
  276. }
  277. void __cpuinit detect_ht(struct cpuinfo_x86 *c)
  278. {
  279. #ifdef CONFIG_X86_HT
  280. u32 eax, ebx, ecx, edx;
  281. int index_msb, core_bits;
  282. if (!cpu_has(c, X86_FEATURE_HT))
  283. return;
  284. if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
  285. goto out;
  286. if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
  287. return;
  288. cpuid(1, &eax, &ebx, &ecx, &edx);
  289. smp_num_siblings = (ebx & 0xff0000) >> 16;
  290. if (smp_num_siblings == 1) {
  291. printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
  292. } else if (smp_num_siblings > 1) {
  293. if (smp_num_siblings > NR_CPUS) {
  294. printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
  295. smp_num_siblings);
  296. smp_num_siblings = 1;
  297. return;
  298. }
  299. index_msb = get_count_order(smp_num_siblings);
  300. #ifdef CONFIG_X86_64
  301. c->phys_proc_id = phys_pkg_id(index_msb);
  302. #else
  303. c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
  304. #endif
  305. smp_num_siblings = smp_num_siblings / c->x86_max_cores;
  306. index_msb = get_count_order(smp_num_siblings);
  307. core_bits = get_count_order(c->x86_max_cores);
  308. #ifdef CONFIG_X86_64
  309. c->cpu_core_id = phys_pkg_id(index_msb) &
  310. ((1 << core_bits) - 1);
  311. #else
  312. c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
  313. ((1 << core_bits) - 1);
  314. #endif
  315. }
  316. out:
  317. if ((c->x86_max_cores * smp_num_siblings) > 1) {
  318. printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
  319. c->phys_proc_id);
  320. printk(KERN_INFO "CPU: Processor Core ID: %d\n",
  321. c->cpu_core_id);
  322. }
  323. #endif
  324. }
  325. static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
  326. {
  327. char *v = c->x86_vendor_id;
  328. int i;
  329. static int printed;
  330. for (i = 0; i < X86_VENDOR_NUM; i++) {
  331. if (!cpu_devs[i])
  332. break;
  333. if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
  334. (cpu_devs[i]->c_ident[1] &&
  335. !strcmp(v, cpu_devs[i]->c_ident[1]))) {
  336. this_cpu = cpu_devs[i];
  337. c->x86_vendor = this_cpu->c_x86_vendor;
  338. return;
  339. }
  340. }
  341. if (!printed) {
  342. printed++;
  343. printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
  344. printk(KERN_ERR "CPU: Your system may be unstable.\n");
  345. }
  346. c->x86_vendor = X86_VENDOR_UNKNOWN;
  347. this_cpu = &default_cpu;
  348. }
  349. void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
  350. {
  351. /* Get vendor name */
  352. cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
  353. (unsigned int *)&c->x86_vendor_id[0],
  354. (unsigned int *)&c->x86_vendor_id[8],
  355. (unsigned int *)&c->x86_vendor_id[4]);
  356. c->x86 = 4;
  357. /* Intel-defined flags: level 0x00000001 */
  358. if (c->cpuid_level >= 0x00000001) {
  359. u32 junk, tfms, cap0, misc;
  360. cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
  361. c->x86 = (tfms >> 8) & 0xf;
  362. c->x86_model = (tfms >> 4) & 0xf;
  363. c->x86_mask = tfms & 0xf;
  364. if (c->x86 == 0xf)
  365. c->x86 += (tfms >> 20) & 0xff;
  366. if (c->x86 >= 0x6)
  367. c->x86_model += ((tfms >> 16) & 0xf) << 4;
  368. if (cap0 & (1<<19)) {
  369. c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
  370. c->x86_cache_alignment = c->x86_clflush_size;
  371. }
  372. }
  373. }
  374. static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
  375. {
  376. u32 tfms, xlvl;
  377. u32 ebx;
  378. /* Intel-defined flags: level 0x00000001 */
  379. if (c->cpuid_level >= 0x00000001) {
  380. u32 capability, excap;
  381. cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
  382. c->x86_capability[0] = capability;
  383. c->x86_capability[4] = excap;
  384. }
  385. /* AMD-defined flags: level 0x80000001 */
  386. xlvl = cpuid_eax(0x80000000);
  387. c->extended_cpuid_level = xlvl;
  388. if ((xlvl & 0xffff0000) == 0x80000000) {
  389. if (xlvl >= 0x80000001) {
  390. c->x86_capability[1] = cpuid_edx(0x80000001);
  391. c->x86_capability[6] = cpuid_ecx(0x80000001);
  392. }
  393. }
  394. }
  395. /*
  396. * Do minimum CPU detection early.
  397. * Fields really needed: vendor, cpuid_level, family, model, mask,
  398. * cache alignment.
  399. * The others are not touched to avoid unwanted side effects.
  400. *
  401. * WARNING: this function is only called on the BP. Don't add code here
  402. * that is supposed to run on all CPUs.
  403. */
  404. static void __init early_identify_cpu(struct cpuinfo_x86 *c)
  405. {
  406. c->x86_clflush_size = 32;
  407. c->x86_cache_alignment = c->x86_clflush_size;
  408. if (!have_cpuid_p())
  409. return;
  410. memset(&c->x86_capability, 0, sizeof c->x86_capability);
  411. c->extended_cpuid_level = 0;
  412. cpu_detect(c);
  413. get_cpu_vendor(c);
  414. get_cpu_cap(c);
  415. if (this_cpu->c_early_init)
  416. this_cpu->c_early_init(c);
  417. validate_pat_support(c);
  418. }
  419. void __init early_cpu_init(void)
  420. {
  421. struct cpu_dev **cdev;
  422. int count = 0;
  423. printk("KERNEL supported cpus:\n");
  424. for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
  425. struct cpu_dev *cpudev = *cdev;
  426. unsigned int j;
  427. if (count >= X86_VENDOR_NUM)
  428. break;
  429. cpu_devs[count] = cpudev;
  430. count++;
  431. for (j = 0; j < 2; j++) {
  432. if (!cpudev->c_ident[j])
  433. continue;
  434. printk(" %s %s\n", cpudev->c_vendor,
  435. cpudev->c_ident[j]);
  436. }
  437. }
  438. early_identify_cpu(&boot_cpu_data);
  439. }
  440. /*
  441. * The NOPL instruction is supposed to exist on all CPUs with
  442. * family >= 6, unfortunately, that's not true in practice because
  443. * of early VIA chips and (more importantly) broken virtualizers that
  444. * are not easy to detect. Hence, probe for it based on first
  445. * principles.
  446. */
  447. static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
  448. {
  449. const u32 nopl_signature = 0x888c53b1; /* Random number */
  450. u32 has_nopl = nopl_signature;
  451. clear_cpu_cap(c, X86_FEATURE_NOPL);
  452. if (c->x86 >= 6) {
  453. asm volatile("\n"
  454. "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
  455. "2:\n"
  456. " .section .fixup,\"ax\"\n"
  457. "3: xor %0,%0\n"
  458. " jmp 2b\n"
  459. " .previous\n"
  460. _ASM_EXTABLE(1b,3b)
  461. : "+a" (has_nopl));
  462. if (has_nopl == nopl_signature)
  463. set_cpu_cap(c, X86_FEATURE_NOPL);
  464. }
  465. }
  466. static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
  467. {
  468. if (!have_cpuid_p())
  469. return;
  470. c->extended_cpuid_level = 0;
  471. cpu_detect(c);
  472. get_cpu_vendor(c);
  473. get_cpu_cap(c);
  474. if (c->cpuid_level >= 0x00000001) {
  475. c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
  476. #ifdef CONFIG_X86_HT
  477. c->apicid = phys_pkg_id(c->initial_apicid, 0);
  478. c->phys_proc_id = c->initial_apicid;
  479. #else
  480. c->apicid = c->initial_apicid;
  481. #endif
  482. }
  483. if (c->extended_cpuid_level >= 0x80000004)
  484. get_model_name(c); /* Default name */
  485. init_scattered_cpuid_features(c);
  486. detect_nopl(c);
  487. }
  488. /*
  489. * This does the hard work of actually picking apart the CPU stuff...
  490. */
  491. static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  492. {
  493. int i;
  494. c->loops_per_jiffy = loops_per_jiffy;
  495. c->x86_cache_size = -1;
  496. c->x86_vendor = X86_VENDOR_UNKNOWN;
  497. c->cpuid_level = -1; /* CPUID not detected */
  498. c->x86_model = c->x86_mask = 0; /* So far unknown... */
  499. c->x86_vendor_id[0] = '\0'; /* Unset */
  500. c->x86_model_id[0] = '\0'; /* Unset */
  501. c->x86_max_cores = 1;
  502. c->x86_clflush_size = 32;
  503. memset(&c->x86_capability, 0, sizeof c->x86_capability);
  504. if (!have_cpuid_p()) {
  505. /*
  506. * First of all, decide if this is a 486 or higher
  507. * It's a 486 if we can modify the AC flag
  508. */
  509. if (flag_is_changeable_p(X86_EFLAGS_AC))
  510. c->x86 = 4;
  511. else
  512. c->x86 = 3;
  513. }
  514. generic_identify(c);
  515. if (this_cpu->c_identify)
  516. this_cpu->c_identify(c);
  517. /*
  518. * Vendor-specific initialization. In this section we
  519. * canonicalize the feature flags, meaning if there are
  520. * features a certain CPU supports which CPUID doesn't
  521. * tell us, CPUID claiming incorrect flags, or other bugs,
  522. * we handle them here.
  523. *
  524. * At the end of this section, c->x86_capability better
  525. * indicate the features this CPU genuinely supports!
  526. */
  527. if (this_cpu->c_init)
  528. this_cpu->c_init(c);
  529. /* Disable the PN if appropriate */
  530. squash_the_stupid_serial_number(c);
  531. /*
  532. * The vendor-specific functions might have changed features. Now
  533. * we do "generic changes."
  534. */
  535. /* If the model name is still unset, do table lookup. */
  536. if (!c->x86_model_id[0]) {
  537. char *p;
  538. p = table_lookup_model(c);
  539. if (p)
  540. strcpy(c->x86_model_id, p);
  541. else
  542. /* Last resort... */
  543. sprintf(c->x86_model_id, "%02x/%02x",
  544. c->x86, c->x86_model);
  545. }
  546. /*
  547. * On SMP, boot_cpu_data holds the common feature set between
  548. * all CPUs; so make sure that we indicate which features are
  549. * common between the CPUs. The first time this routine gets
  550. * executed, c == &boot_cpu_data.
  551. */
  552. if (c != &boot_cpu_data) {
  553. /* AND the already accumulated flags with these */
  554. for (i = 0; i < NCAPINTS; i++)
  555. boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
  556. }
  557. /* Clear all flags overriden by options */
  558. for (i = 0; i < NCAPINTS; i++)
  559. c->x86_capability[i] &= ~cleared_cpu_caps[i];
  560. /* Init Machine Check Exception if available. */
  561. mcheck_init(c);
  562. select_idle_routine(c);
  563. }
  564. void __init identify_boot_cpu(void)
  565. {
  566. identify_cpu(&boot_cpu_data);
  567. sysenter_setup();
  568. enable_sep_cpu();
  569. }
  570. void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
  571. {
  572. BUG_ON(c == &boot_cpu_data);
  573. identify_cpu(c);
  574. enable_sep_cpu();
  575. mtrr_ap_init();
  576. }
  577. struct msr_range {
  578. unsigned min;
  579. unsigned max;
  580. };
  581. static struct msr_range msr_range_array[] __cpuinitdata = {
  582. { 0x00000000, 0x00000418},
  583. { 0xc0000000, 0xc000040b},
  584. { 0xc0010000, 0xc0010142},
  585. { 0xc0011000, 0xc001103b},
  586. };
  587. static void __cpuinit print_cpu_msr(void)
  588. {
  589. unsigned index;
  590. u64 val;
  591. int i;
  592. unsigned index_min, index_max;
  593. for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
  594. index_min = msr_range_array[i].min;
  595. index_max = msr_range_array[i].max;
  596. for (index = index_min; index < index_max; index++) {
  597. if (rdmsrl_amd_safe(index, &val))
  598. continue;
  599. printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
  600. }
  601. }
  602. }
  603. static int show_msr __cpuinitdata;
  604. static __init int setup_show_msr(char *arg)
  605. {
  606. int num;
  607. get_option(&arg, &num);
  608. if (num > 0)
  609. show_msr = num;
  610. return 1;
  611. }
  612. __setup("show_msr=", setup_show_msr);
  613. static __init int setup_noclflush(char *arg)
  614. {
  615. setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
  616. return 1;
  617. }
  618. __setup("noclflush", setup_noclflush);
  619. void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
  620. {
  621. char *vendor = NULL;
  622. if (c->x86_vendor < X86_VENDOR_NUM)
  623. vendor = this_cpu->c_vendor;
  624. else if (c->cpuid_level >= 0)
  625. vendor = c->x86_vendor_id;
  626. if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
  627. printk(KERN_CONT "%s ", vendor);
  628. if (c->x86_model_id[0])
  629. printk(KERN_CONT "%s", c->x86_model_id);
  630. else
  631. printk(KERN_CONT "%d86", c->x86);
  632. if (c->x86_mask || c->cpuid_level >= 0)
  633. printk(KERN_CONT " stepping %02x\n", c->x86_mask);
  634. else
  635. printk(KERN_CONT "\n");
  636. #ifdef CONFIG_SMP
  637. if (c->cpu_index < show_msr)
  638. print_cpu_msr();
  639. #else
  640. if (show_msr)
  641. print_cpu_msr();
  642. #endif
  643. }
  644. static __init int setup_disablecpuid(char *arg)
  645. {
  646. int bit;
  647. if (get_option(&arg, &bit) && bit < NCAPINTS*32)
  648. setup_clear_cpu_cap(bit);
  649. else
  650. return 0;
  651. return 1;
  652. }
  653. __setup("clearcpuid=", setup_disablecpuid);
  654. cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
  655. #ifdef CONFIG_X86_64
  656. struct x8664_pda **_cpu_pda __read_mostly;
  657. EXPORT_SYMBOL(_cpu_pda);
  658. struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
  659. char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
  660. unsigned long __supported_pte_mask __read_mostly = ~0UL;
  661. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  662. static int do_not_nx __cpuinitdata;
  663. /* noexec=on|off
  664. Control non executable mappings for 64bit processes.
  665. on Enable(default)
  666. off Disable
  667. */
  668. static int __init nonx_setup(char *str)
  669. {
  670. if (!str)
  671. return -EINVAL;
  672. if (!strncmp(str, "on", 2)) {
  673. __supported_pte_mask |= _PAGE_NX;
  674. do_not_nx = 0;
  675. } else if (!strncmp(str, "off", 3)) {
  676. do_not_nx = 1;
  677. __supported_pte_mask &= ~_PAGE_NX;
  678. }
  679. return 0;
  680. }
  681. early_param("noexec", nonx_setup);
  682. int force_personality32;
  683. /* noexec32=on|off
  684. Control non executable heap for 32bit processes.
  685. To control the stack too use noexec=off
  686. on PROT_READ does not imply PROT_EXEC for 32bit processes (default)
  687. off PROT_READ implies PROT_EXEC
  688. */
  689. static int __init nonx32_setup(char *str)
  690. {
  691. if (!strcmp(str, "on"))
  692. force_personality32 &= ~READ_IMPLIES_EXEC;
  693. else if (!strcmp(str, "off"))
  694. force_personality32 |= READ_IMPLIES_EXEC;
  695. return 1;
  696. }
  697. __setup("noexec32=", nonx32_setup);
  698. void pda_init(int cpu)
  699. {
  700. struct x8664_pda *pda = cpu_pda(cpu);
  701. /* Setup up data that may be needed in __get_free_pages early */
  702. loadsegment(fs, 0);
  703. loadsegment(gs, 0);
  704. /* Memory clobbers used to order PDA accessed */
  705. mb();
  706. wrmsrl(MSR_GS_BASE, pda);
  707. mb();
  708. pda->cpunumber = cpu;
  709. pda->irqcount = -1;
  710. pda->kernelstack = (unsigned long)stack_thread_info() -
  711. PDA_STACKOFFSET + THREAD_SIZE;
  712. pda->active_mm = &init_mm;
  713. pda->mmu_state = 0;
  714. if (cpu == 0) {
  715. /* others are initialized in smpboot.c */
  716. pda->pcurrent = &init_task;
  717. pda->irqstackptr = boot_cpu_stack;
  718. pda->irqstackptr += IRQSTACKSIZE - 64;
  719. } else {
  720. if (!pda->irqstackptr) {
  721. pda->irqstackptr = (char *)
  722. __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
  723. if (!pda->irqstackptr)
  724. panic("cannot allocate irqstack for cpu %d",
  725. cpu);
  726. pda->irqstackptr += IRQSTACKSIZE - 64;
  727. }
  728. if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
  729. pda->nodenumber = cpu_to_node(cpu);
  730. }
  731. }
  732. char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
  733. DEBUG_STKSZ] __page_aligned_bss;
  734. extern asmlinkage void ignore_sysret(void);
  735. /* May not be marked __init: used by software suspend */
  736. void syscall_init(void)
  737. {
  738. /*
  739. * LSTAR and STAR live in a bit strange symbiosis.
  740. * They both write to the same internal register. STAR allows to
  741. * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
  742. */
  743. wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
  744. wrmsrl(MSR_LSTAR, system_call);
  745. wrmsrl(MSR_CSTAR, ignore_sysret);
  746. #ifdef CONFIG_IA32_EMULATION
  747. syscall32_cpu_init();
  748. #endif
  749. /* Flags to clear on syscall */
  750. wrmsrl(MSR_SYSCALL_MASK,
  751. X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
  752. }
  753. void __cpuinit check_efer(void)
  754. {
  755. unsigned long efer;
  756. rdmsrl(MSR_EFER, efer);
  757. if (!(efer & EFER_NX) || do_not_nx)
  758. __supported_pte_mask &= ~_PAGE_NX;
  759. }
  760. unsigned long kernel_eflags;
  761. /*
  762. * Copies of the original ist values from the tss are only accessed during
  763. * debugging, no special alignment required.
  764. */
  765. DEFINE_PER_CPU(struct orig_ist, orig_ist);
  766. #else
  767. /* Make sure %fs is initialized properly in idle threads */
  768. struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
  769. {
  770. memset(regs, 0, sizeof(struct pt_regs));
  771. regs->fs = __KERNEL_PERCPU;
  772. return regs;
  773. }
  774. #endif
  775. /*
  776. * cpu_init() initializes state that is per-CPU. Some data is already
  777. * initialized (naturally) in the bootstrap process, such as the GDT
  778. * and IDT. We reload them nevertheless, this function acts as a
  779. * 'CPU state barrier', nothing should get across.
  780. * A lot of state is already set up in PDA init for 64 bit
  781. */
  782. #ifdef CONFIG_X86_64
  783. void __cpuinit cpu_init(void)
  784. {
  785. int cpu = stack_smp_processor_id();
  786. struct tss_struct *t = &per_cpu(init_tss, cpu);
  787. struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
  788. unsigned long v;
  789. char *estacks = NULL;
  790. struct task_struct *me;
  791. int i;
  792. /* CPU 0 is initialised in head64.c */
  793. if (cpu != 0)
  794. pda_init(cpu);
  795. else
  796. estacks = boot_exception_stacks;
  797. me = current;
  798. if (cpu_test_and_set(cpu, cpu_initialized))
  799. panic("CPU#%d already initialized!\n", cpu);
  800. printk(KERN_INFO "Initializing CPU#%d\n", cpu);
  801. clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
  802. /*
  803. * Initialize the per-CPU GDT with the boot GDT,
  804. * and set up the GDT descriptor:
  805. */
  806. switch_to_new_gdt();
  807. load_idt((const struct desc_ptr *)&idt_descr);
  808. memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
  809. syscall_init();
  810. wrmsrl(MSR_FS_BASE, 0);
  811. wrmsrl(MSR_KERNEL_GS_BASE, 0);
  812. barrier();
  813. check_efer();
  814. if (cpu != 0 && x2apic)
  815. enable_x2apic();
  816. /*
  817. * set up and load the per-CPU TSS
  818. */
  819. if (!orig_ist->ist[0]) {
  820. static const unsigned int order[N_EXCEPTION_STACKS] = {
  821. [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
  822. [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
  823. };
  824. for (v = 0; v < N_EXCEPTION_STACKS; v++) {
  825. if (cpu) {
  826. estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
  827. if (!estacks)
  828. panic("Cannot allocate exception "
  829. "stack %ld %d\n", v, cpu);
  830. }
  831. estacks += PAGE_SIZE << order[v];
  832. orig_ist->ist[v] = t->x86_tss.ist[v] =
  833. (unsigned long)estacks;
  834. }
  835. }
  836. t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
  837. /*
  838. * <= is required because the CPU will access up to
  839. * 8 bits beyond the end of the IO permission bitmap.
  840. */
  841. for (i = 0; i <= IO_BITMAP_LONGS; i++)
  842. t->io_bitmap[i] = ~0UL;
  843. atomic_inc(&init_mm.mm_count);
  844. me->active_mm = &init_mm;
  845. if (me->mm)
  846. BUG();
  847. enter_lazy_tlb(&init_mm, me);
  848. load_sp0(t, &current->thread);
  849. set_tss_desc(cpu, t);
  850. load_TR_desc();
  851. load_LDT(&init_mm.context);
  852. #ifdef CONFIG_KGDB
  853. /*
  854. * If the kgdb is connected no debug regs should be altered. This
  855. * is only applicable when KGDB and a KGDB I/O module are built
  856. * into the kernel and you are using early debugging with
  857. * kgdbwait. KGDB will control the kernel HW breakpoint registers.
  858. */
  859. if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
  860. arch_kgdb_ops.correct_hw_break();
  861. else {
  862. #endif
  863. /*
  864. * Clear all 6 debug registers:
  865. */
  866. set_debugreg(0UL, 0);
  867. set_debugreg(0UL, 1);
  868. set_debugreg(0UL, 2);
  869. set_debugreg(0UL, 3);
  870. set_debugreg(0UL, 6);
  871. set_debugreg(0UL, 7);
  872. #ifdef CONFIG_KGDB
  873. /* If the kgdb is connected no debug regs should be altered. */
  874. }
  875. #endif
  876. fpu_init();
  877. raw_local_save_flags(kernel_eflags);
  878. if (is_uv_system())
  879. uv_cpu_init();
  880. }
  881. #else
  882. void __cpuinit cpu_init(void)
  883. {
  884. int cpu = smp_processor_id();
  885. struct task_struct *curr = current;
  886. struct tss_struct *t = &per_cpu(init_tss, cpu);
  887. struct thread_struct *thread = &curr->thread;
  888. if (cpu_test_and_set(cpu, cpu_initialized)) {
  889. printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
  890. for (;;) local_irq_enable();
  891. }
  892. printk(KERN_INFO "Initializing CPU#%d\n", cpu);
  893. if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
  894. clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
  895. load_idt(&idt_descr);
  896. switch_to_new_gdt();
  897. /*
  898. * Set up and load the per-CPU TSS and LDT
  899. */
  900. atomic_inc(&init_mm.mm_count);
  901. curr->active_mm = &init_mm;
  902. if (curr->mm)
  903. BUG();
  904. enter_lazy_tlb(&init_mm, curr);
  905. load_sp0(t, thread);
  906. set_tss_desc(cpu, t);
  907. load_TR_desc();
  908. load_LDT(&init_mm.context);
  909. #ifdef CONFIG_DOUBLEFAULT
  910. /* Set up doublefault TSS pointer in the GDT */
  911. __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
  912. #endif
  913. /* Clear %gs. */
  914. asm volatile ("mov %0, %%gs" : : "r" (0));
  915. /* Clear all 6 debug registers: */
  916. set_debugreg(0, 0);
  917. set_debugreg(0, 1);
  918. set_debugreg(0, 2);
  919. set_debugreg(0, 3);
  920. set_debugreg(0, 6);
  921. set_debugreg(0, 7);
  922. /*
  923. * Force FPU initialization:
  924. */
  925. if (cpu_has_xsave)
  926. current_thread_info()->status = TS_XSAVE;
  927. else
  928. current_thread_info()->status = 0;
  929. clear_used_math();
  930. mxcsr_feature_mask_init();
  931. /*
  932. * Boot processor to setup the FP and extended state context info.
  933. */
  934. if (!smp_processor_id())
  935. init_thread_xstate();
  936. xsave_init();
  937. }
  938. #ifdef CONFIG_HOTPLUG_CPU
  939. void __cpuinit cpu_uninit(void)
  940. {
  941. int cpu = raw_smp_processor_id();
  942. cpu_clear(cpu, cpu_initialized);
  943. /* lazy TLB state */
  944. per_cpu(cpu_tlbstate, cpu).state = 0;
  945. per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
  946. }
  947. #endif
  948. #endif