intel_64.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. #include <linux/init.h>
  2. #include <linux/smp.h>
  3. #include <asm/processor.h>
  4. #include <asm/ptrace.h>
  5. #include <asm/topology.h>
  6. #include <asm/numa_64.h>
  7. #include "cpu.h"
  8. static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
  9. {
  10. if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
  11. (c->x86 == 0x6 && c->x86_model >= 0x0e))
  12. set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  13. set_cpu_cap(c, X86_FEATURE_SYSENTER32);
  14. }
  15. /*
  16. * find out the number of processor cores on the die
  17. */
  18. static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
  19. {
  20. unsigned int eax, t;
  21. if (c->cpuid_level < 4)
  22. return 1;
  23. cpuid_count(4, 0, &eax, &t, &t, &t);
  24. if (eax & 0x1f)
  25. return ((eax >> 26) + 1);
  26. else
  27. return 1;
  28. }
  29. static void __cpuinit srat_detect_node(void)
  30. {
  31. #ifdef CONFIG_NUMA
  32. unsigned node;
  33. int cpu = smp_processor_id();
  34. int apicid = hard_smp_processor_id();
  35. /* Don't do the funky fallback heuristics the AMD version employs
  36. for now. */
  37. node = apicid_to_node[apicid];
  38. if (node == NUMA_NO_NODE || !node_online(node))
  39. node = first_node(node_online_map);
  40. numa_set_node(cpu, node);
  41. printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
  42. #endif
  43. }
  44. static void __cpuinit init_intel(struct cpuinfo_x86 *c)
  45. {
  46. early_init_intel(c);
  47. init_intel_cacheinfo(c);
  48. if (c->cpuid_level > 9) {
  49. unsigned eax = cpuid_eax(10);
  50. /* Check for version and the number of counters */
  51. if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
  52. set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
  53. }
  54. if (cpu_has_ds) {
  55. unsigned int l1, l2;
  56. rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
  57. if (!(l1 & (1<<11)))
  58. set_cpu_cap(c, X86_FEATURE_BTS);
  59. if (!(l1 & (1<<12)))
  60. set_cpu_cap(c, X86_FEATURE_PEBS);
  61. }
  62. if (cpu_has_bts)
  63. ds_init_intel(c);
  64. if (c->x86 == 15)
  65. c->x86_cache_alignment = c->x86_clflush_size * 2;
  66. if (c->x86 == 6)
  67. set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  68. set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
  69. detect_extended_topology(c);
  70. if (!cpu_has(c, X86_FEATURE_XTOPOLOGY))
  71. c->x86_max_cores = intel_num_cpu_cores(c);
  72. srat_detect_node();
  73. }
  74. static struct cpu_dev intel_cpu_dev __cpuinitdata = {
  75. .c_vendor = "Intel",
  76. .c_ident = { "GenuineIntel" },
  77. .c_early_init = early_init_intel,
  78. .c_init = init_intel,
  79. .c_x86_vendor = X86_VENDOR_INTEL,
  80. };
  81. cpu_dev_register(intel_cpu_dev);