|
@@ -3,7 +3,7 @@
|
|
|
*
|
|
|
* Changes:
|
|
|
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
|
|
|
- * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
|
|
|
+ * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
|
|
|
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
|
|
|
*/
|
|
|
|
|
@@ -16,7 +16,7 @@
|
|
|
#include <linux/pci.h>
|
|
|
|
|
|
#include <asm/processor.h>
|
|
|
-#include <asm/smp.h>
|
|
|
+#include <linux/smp.h>
|
|
|
#include <asm/k8.h>
|
|
|
|
|
|
#define LVL_1_INST 1
|
|
@@ -25,14 +25,15 @@
|
|
|
#define LVL_3 4
|
|
|
#define LVL_TRACE 5
|
|
|
|
|
|
-struct _cache_table
|
|
|
-{
|
|
|
+struct _cache_table {
|
|
|
unsigned char descriptor;
|
|
|
char cache_type;
|
|
|
short size;
|
|
|
};
|
|
|
|
|
|
-/* all the cache descriptor types we care about (no TLB or trace cache entries) */
|
|
|
+/* All the cache descriptor types we care about (no TLB or
|
|
|
+ trace cache entries) */
|
|
|
+
|
|
|
static const struct _cache_table __cpuinitconst cache_table[] =
|
|
|
{
|
|
|
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
|
|
@@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
|
|
|
};
|
|
|
|
|
|
|
|
|
-enum _cache_type
|
|
|
-{
|
|
|
+enum _cache_type {
|
|
|
CACHE_TYPE_NULL = 0,
|
|
|
CACHE_TYPE_DATA = 1,
|
|
|
CACHE_TYPE_INST = 2,
|
|
@@ -170,31 +170,31 @@ unsigned short num_cache_leaves;
|
|
|
Maybe later */
|
|
|
union l1_cache {
|
|
|
struct {
|
|
|
- unsigned line_size : 8;
|
|
|
- unsigned lines_per_tag : 8;
|
|
|
- unsigned assoc : 8;
|
|
|
- unsigned size_in_kb : 8;
|
|
|
+ unsigned line_size:8;
|
|
|
+ unsigned lines_per_tag:8;
|
|
|
+ unsigned assoc:8;
|
|
|
+ unsigned size_in_kb:8;
|
|
|
};
|
|
|
unsigned val;
|
|
|
};
|
|
|
|
|
|
union l2_cache {
|
|
|
struct {
|
|
|
- unsigned line_size : 8;
|
|
|
- unsigned lines_per_tag : 4;
|
|
|
- unsigned assoc : 4;
|
|
|
- unsigned size_in_kb : 16;
|
|
|
+ unsigned line_size:8;
|
|
|
+ unsigned lines_per_tag:4;
|
|
|
+ unsigned assoc:4;
|
|
|
+ unsigned size_in_kb:16;
|
|
|
};
|
|
|
unsigned val;
|
|
|
};
|
|
|
|
|
|
union l3_cache {
|
|
|
struct {
|
|
|
- unsigned line_size : 8;
|
|
|
- unsigned lines_per_tag : 4;
|
|
|
- unsigned assoc : 4;
|
|
|
- unsigned res : 2;
|
|
|
- unsigned size_encoded : 14;
|
|
|
+ unsigned line_size:8;
|
|
|
+ unsigned lines_per_tag:4;
|
|
|
+ unsigned assoc:4;
|
|
|
+ unsigned res:2;
|
|
|
+ unsigned size_encoded:14;
|
|
|
};
|
|
|
unsigned val;
|
|
|
};
|
|
@@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void)
|
|
|
|
|
|
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
{
|
|
|
- unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
|
|
|
+ /* Cache sizes */
|
|
|
+ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
|
|
|
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
|
|
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
|
|
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
|
@@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
|
|
|
retval = cpuid4_cache_lookup_regs(i, &this_leaf);
|
|
|
if (retval >= 0) {
|
|
|
- switch(this_leaf.eax.split.level) {
|
|
|
- case 1:
|
|
|
+ switch (this_leaf.eax.split.level) {
|
|
|
+ case 1:
|
|
|
if (this_leaf.eax.split.type ==
|
|
|
CACHE_TYPE_DATA)
|
|
|
new_l1d = this_leaf.size/1024;
|
|
@@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
CACHE_TYPE_INST)
|
|
|
new_l1i = this_leaf.size/1024;
|
|
|
break;
|
|
|
- case 2:
|
|
|
+ case 2:
|
|
|
new_l2 = this_leaf.size/1024;
|
|
|
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
|
index_msb = get_count_order(num_threads_sharing);
|
|
|
l2_id = c->apicid >> index_msb;
|
|
|
break;
|
|
|
- case 3:
|
|
|
+ case 3:
|
|
|
new_l3 = this_leaf.size/1024;
|
|
|
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
|
|
|
- index_msb = get_count_order(num_threads_sharing);
|
|
|
+ index_msb = get_count_order(
|
|
|
+ num_threads_sharing);
|
|
|
l3_id = c->apicid >> index_msb;
|
|
|
break;
|
|
|
- default:
|
|
|
+ default:
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
@@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
/* Number of times to iterate */
|
|
|
n = cpuid_eax(2) & 0xFF;
|
|
|
|
|
|
- for ( i = 0 ; i < n ; i++ ) {
|
|
|
+ for (i = 0 ; i < n ; i++) {
|
|
|
cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
|
|
|
|
|
|
/* If bit 31 is set, this is an unknown format */
|
|
|
- for ( j = 0 ; j < 3 ; j++ ) {
|
|
|
- if (regs[j] & (1 << 31)) regs[j] = 0;
|
|
|
- }
|
|
|
+ for (j = 0 ; j < 3 ; j++)
|
|
|
+ if (regs[j] & (1 << 31))
|
|
|
+ regs[j] = 0;
|
|
|
|
|
|
/* Byte 0 is level count, not a descriptor */
|
|
|
- for ( j = 1 ; j < 16 ; j++ ) {
|
|
|
+ for (j = 1 ; j < 16 ; j++) {
|
|
|
unsigned char des = dp[j];
|
|
|
unsigned char k = 0;
|
|
|
|
|
|
/* look up this descriptor in the table */
|
|
|
- while (cache_table[k].descriptor != 0)
|
|
|
- {
|
|
|
+ while (cache_table[k].descriptor != 0) {
|
|
|
if (cache_table[k].descriptor == des) {
|
|
|
if (only_trace && cache_table[k].cache_type != LVL_TRACE)
|
|
|
break;
|
|
@@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|
|
}
|
|
|
|
|
|
if (trace)
|
|
|
- printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
|
|
|
- else if ( l1i )
|
|
|
- printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
|
|
|
+ printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
|
|
|
+ else if (l1i)
|
|
|
+ printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
|
|
|
|
|
|
if (l1d)
|
|
|
- printk(", L1 D cache: %dK\n", l1d);
|
|
|
+ printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
|
|
|
else
|
|
|
- printk("\n");
|
|
|
+ printk(KERN_CONT "\n");
|
|
|
|
|
|
if (l2)
|
|
|
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
|
|
@@ -558,8 +559,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
|
|
}
|
|
|
}
|
|
|
#else
|
|
|
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
|
|
|
-static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
|
|
|
+static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
|
|
|
+{
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
static void __cpuinit free_cache_attributes(unsigned int cpu)
|
|
@@ -645,7 +651,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
|
|
|
static ssize_t show_##file_name \
|
|
|
(struct _cpuid4_info *this_leaf, char *buf) \
|
|
|
{ \
|
|
|
- return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
|
|
|
+ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
|
|
|
}
|
|
|
|
|
|
show_one_plus(level, eax.split.level, 0);
|
|
@@ -656,7 +662,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
|
|
|
|
|
|
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
|
|
|
{
|
|
|
- return sprintf (buf, "%luK\n", this_leaf->size / 1024);
|
|
|
+ return sprintf(buf, "%luK\n", this_leaf->size / 1024);
|
|
|
}
|
|
|
|
|
|
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
|
@@ -669,7 +675,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
|
|
|
const struct cpumask *mask;
|
|
|
|
|
|
mask = to_cpumask(this_leaf->shared_cpu_map);
|
|
|
- n = type?
|
|
|
+ n = type ?
|
|
|
cpulist_scnprintf(buf, len-2, mask) :
|
|
|
cpumask_scnprintf(buf, len-2, mask);
|
|
|
buf[n++] = '\n';
|
|
@@ -800,7 +806,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
|
|
|
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
|
|
show_cache_disable_1, store_cache_disable_1);
|
|
|
|
|
|
-static struct attribute * default_attrs[] = {
|
|
|
+static struct attribute *default_attrs[] = {
|
|
|
&type.attr,
|
|
|
&level.attr,
|
|
|
&coherency_line_size.attr,
|
|
@@ -815,7 +821,7 @@ static struct attribute * default_attrs[] = {
|
|
|
NULL
|
|
|
};
|
|
|
|
|
|
-static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
|
|
|
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
|
|
{
|
|
|
struct _cache_attr *fattr = to_attr(attr);
|
|
|
struct _index_kobject *this_leaf = to_object(kobj);
|
|
@@ -828,8 +834,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static ssize_t store(struct kobject * kobj, struct attribute * attr,
|
|
|
- const char * buf, size_t count)
|
|
|
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
|
|
+ const char *buf, size_t count)
|
|
|
{
|
|
|
struct _cache_attr *fattr = to_attr(attr);
|
|
|
struct _index_kobject *this_leaf = to_object(kobj);
|
|
@@ -883,7 +889,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
|
|
|
goto err_out;
|
|
|
|
|
|
per_cpu(index_kobject, cpu) = kzalloc(
|
|
|
- sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
|
|
|
+ sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
|
|
|
if (unlikely(per_cpu(index_kobject, cpu) == NULL))
|
|
|
goto err_out;
|
|
|
|
|
@@ -917,7 +923,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < num_cache_leaves; i++) {
|
|
|
- this_object = INDEX_KOBJECT_PTR(cpu,i);
|
|
|
+ this_object = INDEX_KOBJECT_PTR(cpu, i);
|
|
|
this_object->cpu = cpu;
|
|
|
this_object->index = i;
|
|
|
retval = kobject_init_and_add(&(this_object->kobj),
|
|
@@ -925,9 +931,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
|
|
per_cpu(cache_kobject, cpu),
|
|
|
"index%1lu", i);
|
|
|
if (unlikely(retval)) {
|
|
|
- for (j = 0; j < i; j++) {
|
|
|
- kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
|
|
|
- }
|
|
|
+ for (j = 0; j < i; j++)
|
|
|
+ kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
|
|
|
kobject_put(per_cpu(cache_kobject, cpu));
|
|
|
cpuid4_cache_sysfs_exit(cpu);
|
|
|
return retval;
|
|
@@ -952,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
|
|
|
cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
|
|
|
|
|
|
for (i = 0; i < num_cache_leaves; i++)
|
|
|
- kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
|
|
|
+ kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
|
|
|
kobject_put(per_cpu(cache_kobject, cpu));
|
|
|
cpuid4_cache_sysfs_exit(cpu);
|
|
|
}
|
|
@@ -977,8 +982,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
|
|
|
return NOTIFY_OK;
|
|
|
}
|
|
|
|
|
|
-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
|
|
|
-{
|
|
|
+static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
|
|
|
.notifier_call = cacheinfo_cpu_callback,
|
|
|
};
|
|
|
|