|
@@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644);
|
|
|
|
|
|
/* Lookup table for all possible MC control instances */
|
|
|
struct amd64_pvt;
|
|
|
-static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
|
|
|
-static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
|
|
|
+static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
|
|
|
+static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
|
|
|
|
|
|
/*
|
|
|
* See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
|
|
@@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
|
|
|
/* Map from a CSROW entry to the mask entry that operates on it */
|
|
|
static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
|
|
|
{
|
|
|
- return csrow >> (pvt->num_dcsm >> 3);
|
|
|
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
|
|
|
+ return csrow;
|
|
|
+ else
|
|
|
+ return csrow >> 1;
|
|
|
}
|
|
|
|
|
|
/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
|
|
@@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
|
|
|
intlv_en = pvt->dram_IntlvEn[0];
|
|
|
|
|
|
if (intlv_en == 0) {
|
|
|
- for (node_id = 0; ; ) {
|
|
|
+ for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
|
|
|
if (amd64_base_limit_match(pvt, sys_addr, node_id))
|
|
|
- break;
|
|
|
-
|
|
|
- if (++node_id >= DRAM_REG_COUNT)
|
|
|
- goto err_no_match;
|
|
|
+ goto found;
|
|
|
}
|
|
|
- goto found;
|
|
|
+ goto err_no_match;
|
|
|
}
|
|
|
|
|
|
- if (unlikely((intlv_en != (0x01 << 8)) &&
|
|
|
- (intlv_en != (0x03 << 8)) &&
|
|
|
- (intlv_en != (0x07 << 8)))) {
|
|
|
+ if (unlikely((intlv_en != 0x01) &&
|
|
|
+ (intlv_en != 0x03) &&
|
|
|
+ (intlv_en != 0x07))) {
|
|
|
amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
|
|
|
"IntlvEn field of DRAM Base Register for node 0: "
|
|
|
- "This probably indicates a BIOS bug.\n", intlv_en);
|
|
|
+ "this probably indicates a BIOS bug.\n", intlv_en);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
bits = (((u32) sys_addr) >> 12) & intlv_en;
|
|
|
|
|
|
for (node_id = 0; ; ) {
|
|
|
- if ((pvt->dram_limit[node_id] & intlv_en) == bits)
|
|
|
+ if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
|
|
|
break; /* intlv_sel field matches */
|
|
|
|
|
|
if (++node_id >= DRAM_REG_COUNT)
|
|
@@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
|
|
|
/* sanity test for sys_addr */
|
|
|
if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
|
|
|
amd64_printk(KERN_WARNING,
|
|
|
- "%s(): sys_addr 0x%lx falls outside base/limit "
|
|
|
- "address range for node %d with node interleaving "
|
|
|
- "enabled.\n", __func__, (unsigned long)sys_addr,
|
|
|
- node_id);
|
|
|
+ "%s(): sys_addr 0x%llx falls outside base/limit "
|
|
|
+ "address range for node %d with node interleaving "
|
|
|
+ "enabled.\n",
|
|
|
+ __func__, sys_addr, node_id);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
@@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
|
|
|
* base/mask register pair, test the condition shown near the start of
|
|
|
* section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
|
|
|
*/
|
|
|
- for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
|
|
|
+ for (csrow = 0; csrow < pvt->cs_count; csrow++) {
|
|
|
|
|
|
/* This DRAM chip select is disabled on this node */
|
|
|
if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
|
|
@@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
|
|
|
u64 base, mask;
|
|
|
|
|
|
pvt = mci->pvt_info;
|
|
|
- BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
|
|
|
+ BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
|
|
|
|
|
|
base = base_from_dct_base(pvt, csrow);
|
|
|
mask = mask_from_dct_mask(pvt, csrow);
|
|
@@ -962,35 +962,27 @@ err_reg:
|
|
|
*/
|
|
|
static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
|
|
|
{
|
|
|
- if (pvt->ext_model >= OPTERON_CPU_REV_F) {
|
|
|
+
|
|
|
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
|
|
|
+ pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
|
|
|
+ pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
|
|
|
+ pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
|
|
|
+ pvt->dcs_shift = REV_E_DCS_SHIFT;
|
|
|
+ pvt->cs_count = 8;
|
|
|
+ pvt->num_dcsm = 8;
|
|
|
+ } else {
|
|
|
pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
|
|
|
pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
|
|
|
pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
|
|
|
pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
|
|
|
|
|
|
- switch (boot_cpu_data.x86) {
|
|
|
- case 0xf:
|
|
|
- pvt->num_dcsm = REV_F_DCSM_COUNT;
|
|
|
- break;
|
|
|
-
|
|
|
- case 0x10:
|
|
|
- pvt->num_dcsm = F10_DCSM_COUNT;
|
|
|
- break;
|
|
|
-
|
|
|
- case 0x11:
|
|
|
- pvt->num_dcsm = F11_DCSM_COUNT;
|
|
|
- break;
|
|
|
-
|
|
|
- default:
|
|
|
- amd64_printk(KERN_ERR, "Unsupported family!\n");
|
|
|
- break;
|
|
|
+ if (boot_cpu_data.x86 == 0x11) {
|
|
|
+ pvt->cs_count = 4;
|
|
|
+ pvt->num_dcsm = 2;
|
|
|
+ } else {
|
|
|
+ pvt->cs_count = 8;
|
|
|
+ pvt->num_dcsm = 4;
|
|
|
}
|
|
|
- } else {
|
|
|
- pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
|
|
|
- pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
|
|
|
- pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
|
|
|
- pvt->dcs_shift = REV_E_DCS_SHIFT;
|
|
|
- pvt->num_dcsm = REV_E_DCSM_COUNT;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
|
|
|
|
|
|
amd64_set_dct_base_and_mask(pvt);
|
|
|
|
|
|
- for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
|
|
|
+ for (cs = 0; cs < pvt->cs_count; cs++) {
|
|
|
reg = K8_DCSB0 + (cs * 4);
|
|
|
err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
|
|
|
&pvt->dcsb0[cs]);
|
|
@@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|
|
debugf0("Reading K8_DRAM_BASE_LOW failed\n");
|
|
|
|
|
|
/* Extract parts into separate data entries */
|
|
|
- pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
|
|
|
+ pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24;
|
|
|
pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
|
|
|
pvt->dram_rw_en[dram] = (low & 0x3);
|
|
|
|
|
@@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|
|
* Extract parts into separate data entries. Limit is the HIGHEST memory
|
|
|
* location of the region, so lower 24 bits need to be all ones
|
|
|
*/
|
|
|
- pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
|
|
|
+ pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF;
|
|
|
pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
|
|
|
pvt->dram_DstNode[dram] = (low & 0x7);
|
|
|
}
|
|
@@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
|
|
|
* different from the node that detected the error.
|
|
|
*/
|
|
|
src_mci = find_mc_by_sys_addr(mci, SystemAddress);
|
|
|
- if (src_mci) {
|
|
|
+ if (!src_mci) {
|
|
|
amd64_mc_printk(mci, KERN_ERR,
|
|
|
"failed to map error address 0x%lx to a node\n",
|
|
|
(unsigned long)SystemAddress);
|
|
@@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|
|
|
|
|
pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
|
|
|
|
|
|
- pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
|
|
|
- ((u64) low_base & 0xFFFF0000))) << 8;
|
|
|
+ pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
|
|
|
+ (((u64)low_base & 0xFFFF0000) << 24);
|
|
|
|
|
|
low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
|
|
|
high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
|
|
@@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|
|
* Extract address values and form a LIMIT address. Limit is the HIGHEST
|
|
|
* memory location of the region, so low 24 bits need to be all ones.
|
|
|
*/
|
|
|
- low_limit |= 0x0000FFFF;
|
|
|
- pvt->dram_limit[dram] =
|
|
|
- ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
|
|
|
+ pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
|
|
|
+ (((u64) low_limit & 0xFFFF0000) << 24) |
|
|
|
+ 0x00FFFFFF;
|
|
|
}
|
|
|
|
|
|
static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
|
|
@@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
|
|
|
|
|
|
debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
|
|
|
|
|
|
- for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
|
|
|
+ for (csrow = 0; csrow < pvt->cs_count; csrow++) {
|
|
|
|
|
|
cs_base = amd64_get_dct_base(pvt, cs, csrow);
|
|
|
if (!(cs_base & K8_DCSB_CS_ENABLE))
|
|
@@ -2497,7 +2489,7 @@ err_reg:
|
|
|
* NOTE: CPU Revision Dependent code
|
|
|
*
|
|
|
* Input:
|
|
|
- * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
|
|
|
+ * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
|
|
|
* k8 private pointer to -->
|
|
|
* DRAM Bank Address mapping register
|
|
|
* node_id
|
|
@@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
|
|
|
(pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
|
|
|
);
|
|
|
|
|
|
- for (i = 0; i < CHIPSELECT_COUNT; i++) {
|
|
|
+ for (i = 0; i < pvt->cs_count; i++) {
|
|
|
csrow = &mci->csrows[i];
|
|
|
|
|
|
if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
|
|
@@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
|
|
|
goto err_exit;
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
- mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
|
|
|
+ mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
|
|
|
if (!mci)
|
|
|
goto err_exit;
|
|
|
|