|
@@ -15,6 +15,11 @@
|
|
|
#include <asm/ds.h>
|
|
|
#include <asm/bugs.h>
|
|
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+#include <asm/topology.h>
|
|
|
+#include <asm/numa_64.h>
|
|
|
+#endif
|
|
|
+
|
|
|
#include "cpu.h"
|
|
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
@@ -25,14 +30,20 @@
|
|
|
|
|
|
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
|
|
{
|
|
|
- /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
|
|
|
- if (c->x86 == 15 && c->x86_cache_alignment == 64)
|
|
|
- c->x86_cache_alignment = 128;
|
|
|
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
|
|
|
(c->x86 == 0x6 && c->x86_model >= 0x0e))
|
|
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
|
|
+
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
|
|
+#else
|
|
|
+ /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
|
|
|
+ if (c->x86 == 15 && c->x86_cache_alignment == 64)
|
|
|
+ c->x86_cache_alignment = 128;
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
/*
|
|
|
* Early probe support logic for ppro memory erratum #50
|
|
|
*
|
|
@@ -73,6 +84,40 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
|
|
|
}
|
|
|
|
|
|
|
|
|
+
|
|
|
+#ifdef CONFIG_X86_F00F_BUG
|
|
|
+static void __cpuinit trap_init_f00f_bug(void)
|
|
|
+{
|
|
|
+ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Update the IDT descriptor and reload the IDT so that
|
|
|
+ * it uses the read-only mapped virtual address.
|
|
|
+ */
|
|
|
+ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
|
|
|
+ load_idt(&idt_descr);
|
|
|
+}
|
|
|
+#endif
|
|
|
+#endif
|
|
|
+
|
|
|
+static void __cpuinit srat_detect_node(void)
|
|
|
+{
|
|
|
+#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
|
|
+ unsigned node;
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+ int apicid = hard_smp_processor_id();
|
|
|
+
|
|
|
+ /* Don't do the funky fallback heuristics the AMD version employs
|
|
|
+ for now. */
|
|
|
+ node = apicid_to_node[apicid];
|
|
|
+ if (node == NUMA_NO_NODE || !node_online(node))
|
|
|
+ node = first_node(node_online_map);
|
|
|
+ numa_set_node(cpu, node);
|
|
|
+
|
|
|
+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* find out the number of processor cores on the die
|
|
|
*/
|
|
@@ -91,20 +136,6 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_X86_F00F_BUG
|
|
|
-static void __cpuinit trap_init_f00f_bug(void)
|
|
|
-{
|
|
|
- __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
|
|
|
-
|
|
|
- /*
|
|
|
- * Update the IDT descriptor and reload the IDT so that
|
|
|
- * it uses the read-only mapped virtual address.
|
|
|
- */
|
|
|
- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
|
|
|
- load_idt(&idt_descr);
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|
|
{
|
|
|
unsigned int l2 = 0;
|
|
@@ -139,6 +170,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|
|
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
|
|
|
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
|
|
|
clear_cpu_cap(c, X86_FEATURE_SEP);
|
|
@@ -176,18 +208,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|
|
if (p)
|
|
|
strcpy(c->x86_model_id, p);
|
|
|
|
|
|
- detect_extended_topology(c);
|
|
|
-
|
|
|
- if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
|
|
|
- /*
|
|
|
- * let's use the legacy cpuid vector 0x1 and 0x4 for topology
|
|
|
- * detection.
|
|
|
- */
|
|
|
- c->x86_max_cores = intel_num_cpu_cores(c);
|
|
|
- detect_ht(c);
|
|
|
- }
|
|
|
-
|
|
|
- /* Work around errata */
|
|
|
Intel_errata_workarounds(c);
|
|
|
|
|
|
#ifdef CONFIG_X86_INTEL_USERCOPY
|
|
@@ -206,14 +226,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|
|
movsl_mask.mask = 7;
|
|
|
break;
|
|
|
}
|
|
|
+#endif
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
if (cpu_has_xmm2)
|
|
|
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
|
|
- if (c->x86 == 15)
|
|
|
- set_cpu_cap(c, X86_FEATURE_P4);
|
|
|
- if (c->x86 == 6)
|
|
|
- set_cpu_cap(c, X86_FEATURE_P3);
|
|
|
if (cpu_has_ds) {
|
|
|
unsigned int l1;
|
|
|
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
|
|
@@ -224,6 +242,17 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|
|
ds_init_intel(c);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ if (c->x86 == 15)
|
|
|
+ c->x86_cache_alignment = c->x86_clflush_size * 2;
|
|
|
+ if (c->x86 == 6)
|
|
|
+ set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
|
|
+#else
|
|
|
+ if (c->x86 == 15)
|
|
|
+ set_cpu_cap(c, X86_FEATURE_P4);
|
|
|
+ if (c->x86 == 6)
|
|
|
+ set_cpu_cap(c, X86_FEATURE_P3);
|
|
|
+
|
|
|
if (cpu_has_bts)
|
|
|
ptrace_bts_init_intel(c);
|
|
|
|
|
@@ -240,8 +269,25 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|
|
#ifdef CONFIG_X86_NUMAQ
|
|
|
numaq_tsc_disable();
|
|
|
#endif
|
|
|
+#endif
|
|
|
+
|
|
|
+ detect_extended_topology(c);
|
|
|
+ if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
|
|
|
+ /*
|
|
|
+ * let's use the legacy cpuid vector 0x1 and 0x4 for topology
|
|
|
+ * detection.
|
|
|
+ */
|
|
|
+ c->x86_max_cores = intel_num_cpu_cores(c);
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
+ detect_ht(c);
|
|
|
+#endif
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Work around errata */
|
|
|
+ srat_detect_node();
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
|
|
{
|
|
|
/*
|
|
@@ -254,10 +300,12 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
|
|
|
size = 256;
|
|
|
return size;
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
static struct cpu_dev intel_cpu_dev __cpuinitdata = {
|
|
|
.c_vendor = "Intel",
|
|
|
.c_ident = { "GenuineIntel" },
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
.c_models = {
|
|
|
{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
|
|
|
{
|
|
@@ -307,13 +355,12 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
|
|
|
}
|
|
|
},
|
|
|
},
|
|
|
+ .c_size_cache = intel_size_cache,
|
|
|
+#endif
|
|
|
.c_early_init = early_init_intel,
|
|
|
.c_init = init_intel,
|
|
|
- .c_size_cache = intel_size_cache,
|
|
|
.c_x86_vendor = X86_VENDOR_INTEL,
|
|
|
};
|
|
|
|
|
|
cpu_dev_register(intel_cpu_dev);
|
|
|
|
|
|
-/* arch_initcall(intel_cpu_init); */
|
|
|
-
|