cpucheck.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /* -*- linux-c -*- ------------------------------------------------------- *
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. * Copyright 2007 rPath, Inc. - All Rights Reserved
  5. *
  6. * This file is part of the Linux kernel, and is made available under
  7. * the terms of the GNU General Public License version 2.
  8. *
  9. * ----------------------------------------------------------------------- */
  10. /*
  11. * Check for obligatory CPU features and abort if the features are not
  12. * present. This code should be compilable as 16-, 32- or 64-bit
  13. * code, so be very careful with types and inline assembly.
  14. *
  15. * This code should not contain any messages; that requires an
  16. * additional wrapper.
  17. *
  18. * As written, this code is not safe for inclusion into the kernel
  19. * proper (after FPU initialization, in particular).
  20. */
  21. #ifdef _SETUP
  22. # include "boot.h"
  23. # include "bitops.h"
  24. #endif
  25. #include <linux/types.h>
  26. #include <asm/cpufeature.h>
  27. #include <asm/processor-flags.h>
  28. #include <asm/required-features.h>
  29. #include <asm/msr-index.h>
  30. struct cpu_features cpu;
  31. static u32 cpu_vendor[3];
  32. static u32 err_flags[NCAPINTS];
  33. static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
  34. static const u32 req_flags[NCAPINTS] =
  35. {
  36. REQUIRED_MASK0,
  37. REQUIRED_MASK1,
  38. REQUIRED_MASK2,
  39. REQUIRED_MASK3,
  40. REQUIRED_MASK4,
  41. REQUIRED_MASK5,
  42. REQUIRED_MASK6,
  43. REQUIRED_MASK7,
  44. };
  45. #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
  46. static int is_amd(void)
  47. {
  48. return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
  49. cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
  50. cpu_vendor[2] == A32('c', 'A', 'M', 'D');
  51. }
  52. static int is_centaur(void)
  53. {
  54. return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
  55. cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
  56. cpu_vendor[2] == A32('a', 'u', 'l', 's');
  57. }
  58. static int is_transmeta(void)
  59. {
  60. return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
  61. cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
  62. cpu_vendor[2] == A32('M', 'x', '8', '6');
  63. }
  64. static int has_fpu(void)
  65. {
  66. u16 fcw = -1, fsw = -1;
  67. u32 cr0;
  68. asm("movl %%cr0,%0" : "=r" (cr0));
  69. if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
  70. cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
  71. asm volatile("movl %0,%%cr0" : : "r" (cr0));
  72. }
  73. asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
  74. : "+m" (fsw), "+m" (fcw));
  75. return fsw == 0 && (fcw & 0x103f) == 0x003f;
  76. }
  77. static int has_eflag(u32 mask)
  78. {
  79. u32 f0, f1;
  80. asm("pushfl ; "
  81. "pushfl ; "
  82. "popl %0 ; "
  83. "movl %0,%1 ; "
  84. "xorl %2,%1 ; "
  85. "pushl %1 ; "
  86. "popfl ; "
  87. "pushfl ; "
  88. "popl %1 ; "
  89. "popfl"
  90. : "=&r" (f0), "=&r" (f1)
  91. : "ri" (mask));
  92. return !!((f0^f1) & mask);
  93. }
  94. static void get_flags(void)
  95. {
  96. u32 max_intel_level, max_amd_level;
  97. u32 tfms;
  98. if (has_fpu())
  99. set_bit(X86_FEATURE_FPU, cpu.flags);
  100. if (has_eflag(X86_EFLAGS_ID)) {
  101. asm("cpuid"
  102. : "=a" (max_intel_level),
  103. "=b" (cpu_vendor[0]),
  104. "=d" (cpu_vendor[1]),
  105. "=c" (cpu_vendor[2])
  106. : "a" (0));
  107. if (max_intel_level >= 0x00000001 &&
  108. max_intel_level <= 0x0000ffff) {
  109. asm("cpuid"
  110. : "=a" (tfms),
  111. "=c" (cpu.flags[4]),
  112. "=d" (cpu.flags[0])
  113. : "a" (0x00000001)
  114. : "ebx");
  115. cpu.level = (tfms >> 8) & 15;
  116. cpu.model = (tfms >> 4) & 15;
  117. if (cpu.level >= 6)
  118. cpu.model += ((tfms >> 16) & 0xf) << 4;
  119. }
  120. asm("cpuid"
  121. : "=a" (max_amd_level)
  122. : "a" (0x80000000)
  123. : "ebx", "ecx", "edx");
  124. if (max_amd_level >= 0x80000001 &&
  125. max_amd_level <= 0x8000ffff) {
  126. u32 eax = 0x80000001;
  127. asm("cpuid"
  128. : "+a" (eax),
  129. "=c" (cpu.flags[6]),
  130. "=d" (cpu.flags[1])
  131. : : "ebx");
  132. }
  133. }
  134. }
  135. /* Returns a bitmask of which words we have error bits in */
  136. static int check_flags(void)
  137. {
  138. u32 err;
  139. int i;
  140. err = 0;
  141. for (i = 0; i < NCAPINTS; i++) {
  142. err_flags[i] = req_flags[i] & ~cpu.flags[i];
  143. if (err_flags[i])
  144. err |= 1 << i;
  145. }
  146. return err;
  147. }
  148. /*
  149. * Returns -1 on error.
  150. *
  151. * *cpu_level is set to the current CPU level; *req_level to the required
  152. * level. x86-64 is considered level 64 for this purpose.
  153. *
  154. * *err_flags_ptr is set to the flags error array if there are flags missing.
  155. */
  156. int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
  157. {
  158. int err;
  159. memset(&cpu.flags, 0, sizeof cpu.flags);
  160. cpu.level = 3;
  161. if (has_eflag(X86_EFLAGS_AC))
  162. cpu.level = 4;
  163. get_flags();
  164. err = check_flags();
  165. if (test_bit(X86_FEATURE_LM, cpu.flags))
  166. cpu.level = 64;
  167. if (err == 0x01 &&
  168. !(err_flags[0] &
  169. ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
  170. is_amd()) {
  171. /* If this is an AMD and we're only missing SSE+SSE2, try to
  172. turn them on */
  173. u32 ecx = MSR_K7_HWCR;
  174. u32 eax, edx;
  175. asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
  176. eax &= ~(1 << 15);
  177. asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
  178. get_flags(); /* Make sure it really did something */
  179. err = check_flags();
  180. } else if (err == 0x01 &&
  181. !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
  182. is_centaur() && cpu.model >= 6) {
  183. /* If this is a VIA C3, we might have to enable CX8
  184. explicitly */
  185. u32 ecx = MSR_VIA_FCR;
  186. u32 eax, edx;
  187. asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
  188. eax |= (1<<1)|(1<<7);
  189. asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
  190. set_bit(X86_FEATURE_CX8, cpu.flags);
  191. err = check_flags();
  192. } else if (err == 0x01 && is_transmeta()) {
  193. /* Transmeta might have masked feature bits in word 0 */
  194. u32 ecx = 0x80860004;
  195. u32 eax, edx;
  196. u32 level = 1;
  197. asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
  198. asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
  199. asm("cpuid"
  200. : "+a" (level), "=d" (cpu.flags[0])
  201. : : "ecx", "ebx");
  202. asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
  203. err = check_flags();
  204. }
  205. if (err_flags_ptr)
  206. *err_flags_ptr = err ? err_flags : NULL;
  207. if (cpu_level_ptr)
  208. *cpu_level_ptr = cpu.level;
  209. if (req_level_ptr)
  210. *req_level_ptr = req_level;
  211. return (cpu.level < req_level || err) ? -1 : 0;
  212. }