intel_64.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. #include <linux/init.h>
  2. #include <linux/smp.h>
  3. #include <asm/processor.h>
  4. #include <asm/ptrace.h>
  5. #include <asm/topology.h>
  6. #include <asm/numa_64.h>
  7. #include "cpu.h"
  8. static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
  9. {
  10. if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
  11. (c->x86 == 0x6 && c->x86_model >= 0x0e))
  12. set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  13. }
  14. /*
  15. * find out the number of processor cores on the die
  16. */
  17. static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
  18. {
  19. unsigned int eax, t;
  20. if (c->cpuid_level < 4)
  21. return 1;
  22. cpuid_count(4, 0, &eax, &t, &t, &t);
  23. if (eax & 0x1f)
  24. return ((eax >> 26) + 1);
  25. else
  26. return 1;
  27. }
  28. static void __cpuinit srat_detect_node(void)
  29. {
  30. #ifdef CONFIG_NUMA
  31. unsigned node;
  32. int cpu = smp_processor_id();
  33. int apicid = hard_smp_processor_id();
  34. /* Don't do the funky fallback heuristics the AMD version employs
  35. for now. */
  36. node = apicid_to_node[apicid];
  37. if (node == NUMA_NO_NODE || !node_online(node))
  38. node = first_node(node_online_map);
  39. numa_set_node(cpu, node);
  40. printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
  41. #endif
  42. }
  43. static void __cpuinit init_intel(struct cpuinfo_x86 *c)
  44. {
  45. /* Cache sizes */
  46. unsigned n;
  47. init_intel_cacheinfo(c);
  48. if (c->cpuid_level > 9) {
  49. unsigned eax = cpuid_eax(10);
  50. /* Check for version and the number of counters */
  51. if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
  52. set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
  53. }
  54. if (cpu_has_ds) {
  55. unsigned int l1, l2;
  56. rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
  57. if (!(l1 & (1<<11)))
  58. set_cpu_cap(c, X86_FEATURE_BTS);
  59. if (!(l1 & (1<<12)))
  60. set_cpu_cap(c, X86_FEATURE_PEBS);
  61. }
  62. if (cpu_has_bts)
  63. ds_init_intel(c);
  64. n = c->extended_cpuid_level;
  65. if (n >= 0x80000008) {
  66. unsigned eax = cpuid_eax(0x80000008);
  67. c->x86_virt_bits = (eax >> 8) & 0xff;
  68. c->x86_phys_bits = eax & 0xff;
  69. }
  70. if (c->x86 == 15)
  71. c->x86_cache_alignment = c->x86_clflush_size * 2;
  72. if (c->x86 == 6)
  73. set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  74. set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
  75. c->x86_max_cores = intel_num_cpu_cores(c);
  76. srat_detect_node();
  77. }
  78. static struct cpu_dev intel_cpu_dev __cpuinitdata = {
  79. .c_vendor = "Intel",
  80. .c_ident = { "GenuineIntel" },
  81. .c_early_init = early_init_intel,
  82. .c_init = init_intel,
  83. };
  84. cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);