|
@@ -18,6 +18,7 @@
|
|
|
#include <asm/processor.h>
|
|
|
#include <linux/smp.h>
|
|
|
#include <asm/k8.h>
|
|
|
+#include <asm/smp.h>
|
|
|
|
|
|
#define LVL_1_INST 1
|
|
|
#define LVL_1_DATA 2
|
|
@@ -152,7 +153,8 @@ struct _cpuid4_info {
|
|
|
union _cpuid4_leaf_ebx ebx;
|
|
|
union _cpuid4_leaf_ecx ecx;
|
|
|
unsigned long size;
|
|
|
- unsigned long can_disable;
|
|
|
+ bool can_disable;
|
|
|
+ unsigned int l3_indices;
|
|
|
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
|
|
|
};
|
|
|
|
|
@@ -162,7 +164,8 @@ struct _cpuid4_info_regs {
|
|
|
union _cpuid4_leaf_ebx ebx;
|
|
|
union _cpuid4_leaf_ecx ecx;
|
|
|
unsigned long size;
|
|
|
- unsigned long can_disable;
|
|
|
+ bool can_disable;
|
|
|
+ unsigned int l3_indices;
|
|
|
};
|
|
|
|
|
|
unsigned short num_cache_leaves;
|
|
@@ -292,6 +295,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
|
|
(ebx->split.ways_of_associativity + 1) - 1;
|
|
|
}
|
|
|
|
|
|
+struct _cache_attr {
|
|
|
+ struct attribute attr;
|
|
|
+ ssize_t (*show)(struct _cpuid4_info *, char *);
|
|
|
+ ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
|
|
|
+};
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_SUP_AMD
|
|
|
+static unsigned int __cpuinit amd_calc_l3_indices(void)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * We're called over smp_call_function_single() and therefore
|
|
|
+ * are on the correct cpu.
|
|
|
+ */
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+ int node = cpu_to_node(cpu);
|
|
|
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
|
|
|
+ unsigned int sc0, sc1, sc2, sc3;
|
|
|
+ u32 val = 0;
|
|
|
+
|
|
|
+ pci_read_config_dword(dev, 0x1C4, &val);
|
|
|
+
|
|
|
+ /* calculate subcache sizes */
|
|
|
+ sc0 = !(val & BIT(0));
|
|
|
+ sc1 = !(val & BIT(4));
|
|
|
+ sc2 = !(val & BIT(8)) + !(val & BIT(9));
|
|
|
+ sc3 = !(val & BIT(12)) + !(val & BIT(13));
|
|
|
+
|
|
|
+ return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
|
|
|
+}
|
|
|
+
|
|
|
static void __cpuinit
|
|
|
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
|
|
|
{
|
|
@@ -301,12 +334,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
|
|
|
if (boot_cpu_data.x86 == 0x11)
|
|
|
return;
|
|
|
|
|
|
- /* see erratum #382 */
|
|
|
- if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
|
|
|
+ /* see errata #382 and #388 */
|
|
|
+ if ((boot_cpu_data.x86 == 0x10) &&
|
|
|
+ ((boot_cpu_data.x86_model < 0x8) ||
|
|
|
+ (boot_cpu_data.x86_mask < 0x1)))
|
|
|
return;
|
|
|
|
|
|
- this_leaf->can_disable = 1;
|
|
|
+ this_leaf->can_disable = true;
|
|
|
+ this_leaf->l3_indices = amd_calc_l3_indices();
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
|
|
|
+ unsigned int index)
|
|
|
+{
|
|
|
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
|
|
+ int node = amd_get_nb_id(cpu);
|
|
|
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
|
|
|
+ unsigned int reg = 0;
|
|
|
+
|
|
|
+ if (!this_leaf->can_disable)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!dev)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ pci_read_config_dword(dev, 0x1BC + index * 4, ®);
|
|
|
+ return sprintf(buf, "0x%08x\n", reg);
|
|
|
+}
|
|
|
+
|
|
|
+#define SHOW_CACHE_DISABLE(index) \
|
|
|
+static ssize_t \
|
|
|
+show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
|
|
|
+{ \
|
|
|
+ return show_cache_disable(this_leaf, buf, index); \
|
|
|
+}
|
|
|
+SHOW_CACHE_DISABLE(0)
|
|
|
+SHOW_CACHE_DISABLE(1)
|
|
|
+
|
|
|
+static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
|
|
+ const char *buf, size_t count, unsigned int index)
|
|
|
+{
|
|
|
+ int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
|
|
+ int node = amd_get_nb_id(cpu);
|
|
|
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
|
|
|
+ unsigned long val = 0;
|
|
|
+
|
|
|
+#define SUBCACHE_MASK (3UL << 20)
|
|
|
+#define SUBCACHE_INDEX 0xfff
|
|
|
+
|
|
|
+ if (!this_leaf->can_disable)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!capable(CAP_SYS_ADMIN))
|
|
|
+ return -EPERM;
|
|
|
+
|
|
|
+ if (!dev)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (strict_strtoul(buf, 10, &val) < 0)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* do not allow writes outside of allowed bits */
|
|
|
+ if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
|
|
|
+ ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ val |= BIT(30);
|
|
|
+ pci_write_config_dword(dev, 0x1BC + index * 4, val);
|
|
|
+ /*
|
|
|
+ * We need to WBINVD on a core on the node containing the L3 cache which
|
|
|
+ * indices we disable therefore a simple wbinvd() is not sufficient.
|
|
|
+ */
|
|
|
+ wbinvd_on_cpu(cpu);
|
|
|
+ pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+#define STORE_CACHE_DISABLE(index) \
|
|
|
+static ssize_t \
|
|
|
+store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
|
|
|
+ const char *buf, size_t count) \
|
|
|
+{ \
|
|
|
+ return store_cache_disable(this_leaf, buf, count, index); \
|
|
|
}
|
|
|
+STORE_CACHE_DISABLE(0)
|
|
|
+STORE_CACHE_DISABLE(1)
|
|
|
+
|
|
|
+static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
|
|
|
+ show_cache_disable_0, store_cache_disable_0);
|
|
|
+static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
|
|
+ show_cache_disable_1, store_cache_disable_1);
|
|
|
+
|
|
|
+#else /* CONFIG_CPU_SUP_AMD */
|
|
|
+static void __cpuinit
|
|
|
+amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
|
|
|
+{
|
|
|
+};
|
|
|
+#endif /* CONFIG_CPU_SUP_AMD */
|
|
|
|
|
|
static int
|
|
|
__cpuinit cpuid4_cache_lookup_regs(int index,
|
|
@@ -713,82 +837,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
|
|
|
#define to_object(k) container_of(k, struct _index_kobject, kobj)
|
|
|
#define to_attr(a) container_of(a, struct _cache_attr, attr)
|
|
|
|
|
|
-static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
|
|
|
- unsigned int index)
|
|
|
-{
|
|
|
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
|
|
- int node = cpu_to_node(cpu);
|
|
|
- struct pci_dev *dev = node_to_k8_nb_misc(node);
|
|
|
- unsigned int reg = 0;
|
|
|
-
|
|
|
- if (!this_leaf->can_disable)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (!dev)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- pci_read_config_dword(dev, 0x1BC + index * 4, ®);
|
|
|
- return sprintf(buf, "%x\n", reg);
|
|
|
-}
|
|
|
-
|
|
|
-#define SHOW_CACHE_DISABLE(index) \
|
|
|
-static ssize_t \
|
|
|
-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
|
|
|
-{ \
|
|
|
- return show_cache_disable(this_leaf, buf, index); \
|
|
|
-}
|
|
|
-SHOW_CACHE_DISABLE(0)
|
|
|
-SHOW_CACHE_DISABLE(1)
|
|
|
-
|
|
|
-static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
|
|
|
- const char *buf, size_t count, unsigned int index)
|
|
|
-{
|
|
|
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
|
|
|
- int node = cpu_to_node(cpu);
|
|
|
- struct pci_dev *dev = node_to_k8_nb_misc(node);
|
|
|
- unsigned long val = 0;
|
|
|
- unsigned int scrubber = 0;
|
|
|
-
|
|
|
- if (!this_leaf->can_disable)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (!capable(CAP_SYS_ADMIN))
|
|
|
- return -EPERM;
|
|
|
-
|
|
|
- if (!dev)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (strict_strtoul(buf, 10, &val) < 0)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- val |= 0xc0000000;
|
|
|
-
|
|
|
- pci_read_config_dword(dev, 0x58, &scrubber);
|
|
|
- scrubber &= ~0x1f000000;
|
|
|
- pci_write_config_dword(dev, 0x58, scrubber);
|
|
|
-
|
|
|
- pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
|
|
|
- wbinvd();
|
|
|
- pci_write_config_dword(dev, 0x1BC + index * 4, val);
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
-#define STORE_CACHE_DISABLE(index) \
|
|
|
-static ssize_t \
|
|
|
-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
|
|
|
- const char *buf, size_t count) \
|
|
|
-{ \
|
|
|
- return store_cache_disable(this_leaf, buf, count, index); \
|
|
|
-}
|
|
|
-STORE_CACHE_DISABLE(0)
|
|
|
-STORE_CACHE_DISABLE(1)
|
|
|
-
|
|
|
-struct _cache_attr {
|
|
|
- struct attribute attr;
|
|
|
- ssize_t (*show)(struct _cpuid4_info *, char *);
|
|
|
- ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
|
|
|
-};
|
|
|
-
|
|
|
#define define_one_ro(_name) \
|
|
|
static struct _cache_attr _name = \
|
|
|
__ATTR(_name, 0444, show_##_name, NULL)
|
|
@@ -803,23 +851,28 @@ define_one_ro(size);
|
|
|
define_one_ro(shared_cpu_map);
|
|
|
define_one_ro(shared_cpu_list);
|
|
|
|
|
|
-static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
|
|
|
- show_cache_disable_0, store_cache_disable_0);
|
|
|
-static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
|
|
|
- show_cache_disable_1, store_cache_disable_1);
|
|
|
+#define DEFAULT_SYSFS_CACHE_ATTRS \
|
|
|
+ &type.attr, \
|
|
|
+ &level.attr, \
|
|
|
+ &coherency_line_size.attr, \
|
|
|
+ &physical_line_partition.attr, \
|
|
|
+ &ways_of_associativity.attr, \
|
|
|
+ &number_of_sets.attr, \
|
|
|
+ &size.attr, \
|
|
|
+ &shared_cpu_map.attr, \
|
|
|
+ &shared_cpu_list.attr
|
|
|
|
|
|
static struct attribute *default_attrs[] = {
|
|
|
- &type.attr,
|
|
|
- &level.attr,
|
|
|
- &coherency_line_size.attr,
|
|
|
- &physical_line_partition.attr,
|
|
|
- &ways_of_associativity.attr,
|
|
|
- &number_of_sets.attr,
|
|
|
- &size.attr,
|
|
|
- &shared_cpu_map.attr,
|
|
|
- &shared_cpu_list.attr,
|
|
|
+ DEFAULT_SYSFS_CACHE_ATTRS,
|
|
|
+ NULL
|
|
|
+};
|
|
|
+
|
|
|
+static struct attribute *default_l3_attrs[] = {
|
|
|
+ DEFAULT_SYSFS_CACHE_ATTRS,
|
|
|
+#ifdef CONFIG_CPU_SUP_AMD
|
|
|
&cache_disable_0.attr,
|
|
|
&cache_disable_1.attr,
|
|
|
+#endif
|
|
|
NULL
|
|
|
};
|
|
|
|
|
@@ -910,6 +963,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
|
|
unsigned int cpu = sys_dev->id;
|
|
|
unsigned long i, j;
|
|
|
struct _index_kobject *this_object;
|
|
|
+ struct _cpuid4_info *this_leaf;
|
|
|
int retval;
|
|
|
|
|
|
retval = cpuid4_cache_sysfs_init(cpu);
|
|
@@ -928,6 +982,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
|
|
this_object = INDEX_KOBJECT_PTR(cpu, i);
|
|
|
this_object->cpu = cpu;
|
|
|
this_object->index = i;
|
|
|
+
|
|
|
+ this_leaf = CPUID4_INFO_IDX(cpu, i);
|
|
|
+
|
|
|
+ if (this_leaf->can_disable)
|
|
|
+ ktype_cache.default_attrs = default_l3_attrs;
|
|
|
+ else
|
|
|
+ ktype_cache.default_attrs = default_attrs;
|
|
|
+
|
|
|
retval = kobject_init_and_add(&(this_object->kobj),
|
|
|
&ktype_cache,
|
|
|
per_cpu(ici_cache_kobject, cpu),
|