|
@@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
|
|
|
*
|
|
|
*FIXME: Produce a better mapping/linearisation.
|
|
|
*/
|
|
|
-struct scrubrate {
|
|
|
+static const struct scrubrate {
|
|
|
u32 scrubval; /* bit pattern for scrub rate */
|
|
|
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
|
|
|
} scrubrates[] = {
|
|
@@ -239,7 +239,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
|
|
|
* DRAM base/limit associated with node_id
|
|
|
*/
|
|
|
static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
|
|
|
- unsigned nid)
|
|
|
+ u8 nid)
|
|
|
{
|
|
|
u64 addr;
|
|
|
|
|
@@ -265,7 +265,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
|
|
|
u64 sys_addr)
|
|
|
{
|
|
|
struct amd64_pvt *pvt;
|
|
|
- unsigned node_id;
|
|
|
+ u8 node_id;
|
|
|
u32 intlv_en, bits;
|
|
|
|
|
|
/*
|
|
@@ -939,7 +939,8 @@ static u64 get_error_address(struct mce *m)
|
|
|
struct amd64_pvt *pvt;
|
|
|
u64 cc6_base, tmp_addr;
|
|
|
u32 tmp;
|
|
|
- u8 mce_nid, intlv_en;
|
|
|
+ u16 mce_nid;
|
|
|
+ u8 intlv_en;
|
|
|
|
|
|
if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
|
|
|
return addr;
|
|
@@ -979,10 +980,29 @@ static u64 get_error_address(struct mce *m)
|
|
|
return addr;
|
|
|
}
|
|
|
|
|
|
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
|
|
|
+ unsigned int device,
|
|
|
+ struct pci_dev *related)
|
|
|
+{
|
|
|
+ struct pci_dev *dev = NULL;
|
|
|
+
|
|
|
+ while ((dev = pci_get_device(vendor, device, dev))) {
|
|
|
+ if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
|
|
|
+ (dev->bus->number == related->bus->number) &&
|
|
|
+ (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return dev;
|
|
|
+}
|
|
|
+
|
|
|
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
|
|
|
{
|
|
|
+ struct amd_northbridge *nb;
|
|
|
+ struct pci_dev *misc, *f1 = NULL;
|
|
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
|
int off = range << 3;
|
|
|
+ u32 llim;
|
|
|
|
|
|
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
|
|
|
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
|
|
@@ -996,30 +1016,32 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
|
|
|
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
|
|
|
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
|
|
|
|
|
|
- /* Factor in CC6 save area by reading dst node's limit reg */
|
|
|
- if (c->x86 == 0x15) {
|
|
|
- struct pci_dev *f1 = NULL;
|
|
|
- u8 nid = dram_dst_node(pvt, range);
|
|
|
- u32 llim;
|
|
|
+ /* F15h: factor in CC6 save area by reading dst node's limit reg */
|
|
|
+ if (c->x86 != 0x15)
|
|
|
+ return;
|
|
|
|
|
|
- f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
|
|
|
- if (WARN_ON(!f1))
|
|
|
- return;
|
|
|
+ nb = node_to_amd_nb(dram_dst_node(pvt, range));
|
|
|
+ if (WARN_ON(!nb))
|
|
|
+ return;
|
|
|
|
|
|
- amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
|
|
|
+ misc = nb->misc;
|
|
|
+ f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
|
|
|
+ if (WARN_ON(!f1))
|
|
|
+ return;
|
|
|
|
|
|
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
|
|
|
+ amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
|
|
|
|
|
|
- /* {[39:27],111b} */
|
|
|
- pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
|
|
|
+ pvt->ranges[range].lim.lo &= GENMASK(0, 15);
|
|
|
|
|
|
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
|
|
|
+ /* {[39:27],111b} */
|
|
|
+ pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
|
|
|
|
|
|
- /* [47:40] */
|
|
|
- pvt->ranges[range].lim.hi |= llim >> 13;
|
|
|
+ pvt->ranges[range].lim.hi &= GENMASK(0, 7);
|
|
|
|
|
|
- pci_dev_put(f1);
|
|
|
- }
|
|
|
+ /* [47:40] */
|
|
|
+ pvt->ranges[range].lim.hi |= llim >> 13;
|
|
|
+
|
|
|
+ pci_dev_put(f1);
|
|
|
}
|
|
|
|
|
|
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
|
|
@@ -1305,7 +1327,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
|
|
|
}
|
|
|
|
|
|
/* Convert the sys_addr to the normalized DCT address */
|
|
|
-static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
|
|
|
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
|
|
|
u64 sys_addr, bool hi_rng,
|
|
|
u32 dct_sel_base_addr)
|
|
|
{
|
|
@@ -1381,7 +1403,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
|
|
|
* -EINVAL: NOT FOUND
|
|
|
* 0..csrow = Chip-Select Row
|
|
|
*/
|
|
|
-static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
|
|
|
+static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
|
|
|
{
|
|
|
struct mem_ctl_info *mci;
|
|
|
struct amd64_pvt *pvt;
|
|
@@ -1672,23 +1694,6 @@ static struct amd64_family_type amd64_family_types[] = {
|
|
|
},
|
|
|
};
|
|
|
|
|
|
-static struct pci_dev *pci_get_related_function(unsigned int vendor,
|
|
|
- unsigned int device,
|
|
|
- struct pci_dev *related)
|
|
|
-{
|
|
|
- struct pci_dev *dev = NULL;
|
|
|
-
|
|
|
- dev = pci_get_device(vendor, device, dev);
|
|
|
- while (dev) {
|
|
|
- if ((dev->bus->number == related->bus->number) &&
|
|
|
- (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
|
|
|
- break;
|
|
|
- dev = pci_get_device(vendor, device, dev);
|
|
|
- }
|
|
|
-
|
|
|
- return dev;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* These are tables of eigenvectors (one per line) which can be used for the
|
|
|
* construction of the syndrome tables. The modified syndrome search algorithm
|
|
@@ -1696,7 +1701,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
|
|
|
*
|
|
|
* Algorithm courtesy of Ross LaFetra from AMD.
|
|
|
*/
|
|
|
-static u16 x4_vectors[] = {
|
|
|
+static const u16 x4_vectors[] = {
|
|
|
0x2f57, 0x1afe, 0x66cc, 0xdd88,
|
|
|
0x11eb, 0x3396, 0x7f4c, 0xeac8,
|
|
|
0x0001, 0x0002, 0x0004, 0x0008,
|
|
@@ -1735,7 +1740,7 @@ static u16 x4_vectors[] = {
|
|
|
0x19a9, 0x2efe, 0xb5cc, 0x6f88,
|
|
|
};
|
|
|
|
|
|
-static u16 x8_vectors[] = {
|
|
|
+static const u16 x8_vectors[] = {
|
|
|
0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
|
|
|
0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
|
|
|
0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
|
|
@@ -1757,7 +1762,7 @@ static u16 x8_vectors[] = {
|
|
|
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
|
|
|
};
|
|
|
|
|
|
-static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
|
|
|
+static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
|
|
|
unsigned v_dim)
|
|
|
{
|
|
|
unsigned int i, err_sym;
|
|
@@ -2181,7 +2186,7 @@ static int init_csrows(struct mem_ctl_info *mci)
|
|
|
}
|
|
|
|
|
|
/* get all cores on this DCT */
|
|
|
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
|
|
|
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
|
|
|
{
|
|
|
int cpu;
|
|
|
|
|
@@ -2191,7 +2196,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
|
|
|
}
|
|
|
|
|
|
/* check MCG_CTL on all the cpus on this node */
|
|
|
-static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
|
|
|
+static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
|
|
|
{
|
|
|
cpumask_var_t mask;
|
|
|
int cpu, nbe;
|
|
@@ -2224,7 +2229,7 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
|
|
|
+static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
|
|
|
{
|
|
|
cpumask_var_t cmask;
|
|
|
int cpu;
|
|
@@ -2262,7 +2267,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
|
|
|
+static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
|
|
|
struct pci_dev *F3)
|
|
|
{
|
|
|
bool ret = true;
|
|
@@ -2314,7 +2319,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
|
|
|
+static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
|
|
|
struct pci_dev *F3)
|
|
|
{
|
|
|
u32 value, mask = 0x3; /* UECC/CECC enable */
|
|
@@ -2353,7 +2358,7 @@ static const char *ecc_msg =
|
|
|
"'ecc_enable_override'.\n"
|
|
|
" (Note that use of the override may cause unknown side effects.)\n";
|
|
|
|
|
|
-static bool ecc_enabled(struct pci_dev *F3, u8 nid)
|
|
|
+static bool ecc_enabled(struct pci_dev *F3, u16 nid)
|
|
|
{
|
|
|
u32 value;
|
|
|
u8 ecc_en = 0;
|
|
@@ -2474,7 +2479,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
|
|
|
struct mem_ctl_info *mci = NULL;
|
|
|
struct edac_mc_layer layers[2];
|
|
|
int err = 0, ret;
|
|
|
- u8 nid = get_node_id(F2);
|
|
|
+ u16 nid = amd_get_node_id(F2);
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
|
|
@@ -2566,7 +2571,7 @@ err_ret:
|
|
|
static int amd64_probe_one_instance(struct pci_dev *pdev,
|
|
|
const struct pci_device_id *mc_type)
|
|
|
{
|
|
|
- u8 nid = get_node_id(pdev);
|
|
|
+ u16 nid = amd_get_node_id(pdev);
|
|
|
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
|
|
|
struct ecc_settings *s;
|
|
|
int ret = 0;
|
|
@@ -2616,7 +2621,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
|
|
|
{
|
|
|
struct mem_ctl_info *mci;
|
|
|
struct amd64_pvt *pvt;
|
|
|
- u8 nid = get_node_id(pdev);
|
|
|
+ u16 nid = amd_get_node_id(pdev);
|
|
|
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
|
|
|
struct ecc_settings *s = ecc_stngs[nid];
|
|
|
|