addon_cpuid_features.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /*
  2. * Routines to indentify additional cpu features that are scattered in
  3. * cpuid space.
  4. */
  5. #include <linux/cpu.h>
  6. #include <asm/pat.h>
  7. #include <asm/processor.h>
  8. #include <mach_apic.h>
  9. struct cpuid_bit {
  10. u16 feature;
  11. u8 reg;
  12. u8 bit;
  13. u32 level;
  14. };
  15. enum cpuid_regs {
  16. CR_EAX = 0,
  17. CR_ECX,
  18. CR_EDX,
  19. CR_EBX
  20. };
  21. void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
  22. {
  23. u32 max_level;
  24. u32 regs[4];
  25. const struct cpuid_bit *cb;
  26. static const struct cpuid_bit cpuid_bits[] = {
  27. { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
  28. { 0, 0, 0, 0 }
  29. };
  30. for (cb = cpuid_bits; cb->feature; cb++) {
  31. /* Verify that the level is valid */
  32. max_level = cpuid_eax(cb->level & 0xffff0000);
  33. if (max_level < cb->level ||
  34. max_level > (cb->level | 0xffff))
  35. continue;
  36. cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
  37. &regs[CR_ECX], &regs[CR_EDX]);
  38. if (regs[cb->reg] & (1 << cb->bit))
  39. set_cpu_cap(c, cb->feature);
  40. }
  41. }
  42. /* leaf 0xb SMT level */
  43. #define SMT_LEVEL 0
  44. /* leaf 0xb sub-leaf types */
  45. #define INVALID_TYPE 0
  46. #define SMT_TYPE 1
  47. #define CORE_TYPE 2
  48. #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
  49. #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
  50. #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
  51. /*
  52. * Check for extended topology enumeration cpuid leaf 0xb and if it
  53. * exists, use it for populating initial_apicid and cpu topology
  54. * detection.
  55. */
  56. void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
  57. {
  58. #ifdef CONFIG_X86_SMP
  59. unsigned int eax, ebx, ecx, edx, sub_index;
  60. unsigned int ht_mask_width, core_plus_mask_width;
  61. unsigned int core_select_mask, core_level_siblings;
  62. if (c->cpuid_level < 0xb)
  63. return;
  64. cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
  65. /*
  66. * check if the cpuid leaf 0xb is actually implemented.
  67. */
  68. if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
  69. return;
  70. set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
  71. /*
  72. * initial apic id, which also represents 32-bit extended x2apic id.
  73. */
  74. c->initial_apicid = edx;
  75. /*
  76. * Populate HT related information from sub-leaf level 0.
  77. */
  78. core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
  79. core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
  80. sub_index = 1;
  81. do {
  82. cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
  83. /*
  84. * Check for the Core type in the implemented sub leaves.
  85. */
  86. if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
  87. core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
  88. core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
  89. break;
  90. }
  91. sub_index++;
  92. } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
  93. core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
  94. #ifdef CONFIG_X86_32
  95. c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
  96. & core_select_mask;
  97. c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
  98. /*
  99. * Reinit the apicid, now that we have extended initial_apicid.
  100. */
  101. c->apicid = phys_pkg_id(c->initial_apicid, 0);
  102. #else
  103. c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
  104. c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
  105. /*
  106. * Reinit the apicid, now that we have extended initial_apicid.
  107. */
  108. c->apicid = phys_pkg_id(0);
  109. #endif
  110. c->x86_max_cores = (core_level_siblings / smp_num_siblings);
  111. printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
  112. c->phys_proc_id);
  113. if (c->x86_max_cores > 1)
  114. printk(KERN_INFO "CPU: Processor Core ID: %d\n",
  115. c->cpu_core_id);
  116. return;
  117. #endif
  118. }
  119. #ifdef CONFIG_X86_PAT
  120. void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
  121. {
  122. if (!cpu_has_pat)
  123. pat_disable("PAT not supported by CPU.");
  124. switch (c->x86_vendor) {
  125. case X86_VENDOR_INTEL:
  126. /*
  127. * There is a known erratum on Pentium III and Core Solo
  128. * and Core Duo CPUs.
  129. * " Page with PAT set to WC while associated MTRR is UC
  130. * may consolidate to UC "
  131. * Because of this erratum, it is better to stick with
  132. * setting WC in MTRR rather than using PAT on these CPUs.
  133. *
  134. * Enable PAT WC only on P4, Core 2 or later CPUs.
  135. */
  136. if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
  137. return;
  138. pat_disable("PAT WC disabled due to known CPU erratum.");
  139. return;
  140. case X86_VENDOR_AMD:
  141. case X86_VENDOR_CENTAUR:
  142. case X86_VENDOR_TRANSMETA:
  143. return;
  144. }
  145. pat_disable("PAT disabled. Not yet verified on this CPU type.");
  146. }
  147. #endif