|
@@ -225,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- if (c->cpuid_level > 1) {
|
|
|
+ /*
|
|
|
+ * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
|
|
|
+ * trace cache
|
|
|
+ */
|
|
|
+ if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
|
|
|
/* supports eax=2 call */
|
|
|
int i, j, n;
|
|
|
int regs[4];
|
|
|
unsigned char *dp = (unsigned char *)regs;
|
|
|
+ int only_trace = 0;
|
|
|
+
|
|
|
+ if (num_cache_leaves != 0 && c->x86 == 15)
|
|
|
+ only_trace = 1;
|
|
|
|
|
|
/* Number of times to iterate */
|
|
|
n = cpuid_eax(2) & 0xFF;
|
|
@@ -251,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
while (cache_table[k].descriptor != 0)
|
|
|
{
|
|
|
if (cache_table[k].descriptor == des) {
|
|
|
+ if (only_trace && cache_table[k].cache_type != LVL_TRACE)
|
|
|
+ break;
|
|
|
switch (cache_table[k].cache_type) {
|
|
|
case LVL_1_INST:
|
|
|
l1i += cache_table[k].size;
|
|
@@ -276,43 +286,46 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- if (new_l1d)
|
|
|
- l1d = new_l1d;
|
|
|
+ if (new_l1d)
|
|
|
+ l1d = new_l1d;
|
|
|
|
|
|
- if (new_l1i)
|
|
|
- l1i = new_l1i;
|
|
|
+ if (new_l1i)
|
|
|
+ l1i = new_l1i;
|
|
|
|
|
|
- if (new_l2) {
|
|
|
- l2 = new_l2;
|
|
|
+ if (new_l2) {
|
|
|
+ l2 = new_l2;
|
|
|
#ifdef CONFIG_SMP
|
|
|
- cpu_llc_id[cpu] = l2_id;
|
|
|
+ cpu_llc_id[cpu] = l2_id;
|
|
|
#endif
|
|
|
- }
|
|
|
+ }
|
|
|
|
|
|
- if (new_l3) {
|
|
|
- l3 = new_l3;
|
|
|
+ if (new_l3) {
|
|
|
+ l3 = new_l3;
|
|
|
#ifdef CONFIG_SMP
|
|
|
- cpu_llc_id[cpu] = l3_id;
|
|
|
+ cpu_llc_id[cpu] = l3_id;
|
|
|
#endif
|
|
|
- }
|
|
|
-
|
|
|
- if ( trace )
|
|
|
- printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
|
|
|
- else if ( l1i )
|
|
|
- printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
|
|
|
- if ( l1d )
|
|
|
- printk(", L1 D cache: %dK\n", l1d);
|
|
|
- else
|
|
|
- printk("\n");
|
|
|
- if ( l2 )
|
|
|
- printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
|
|
|
- if ( l3 )
|
|
|
- printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
|
|
|
-
|
|
|
- c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
|
|
}
|
|
|
|
|
|
+ if (trace)
|
|
|
+ printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
|
|
|
+ else if ( l1i )
|
|
|
+ printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
|
|
|
+
|
|
|
+ if (l1d)
|
|
|
+ printk(", L1 D cache: %dK\n", l1d);
|
|
|
+ else
|
|
|
+ printk("\n");
|
|
|
+
|
|
|
+ if (l2)
|
|
|
+ printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
|
|
|
+
|
|
|
+ if (l3)
|
|
|
+ printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
|
|
|
+
|
|
|
+ c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
|
|
+
|
|
|
return l2;
|
|
|
}
|
|
|
|