|
@@ -263,8 +263,8 @@ static const struct ipath_cregs ipath_pe_cregs = {
|
|
};
|
|
};
|
|
|
|
|
|
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
|
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
|
-#define INFINIPATH_I_RCVURG_MASK 0x1F
|
|
|
|
-#define INFINIPATH_I_RCVAVAIL_MASK 0x1F
|
|
|
|
|
|
+#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
|
|
|
|
+#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
|
|
|
|
|
|
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
|
|
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
|
|
#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
|
|
#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
|
|
@@ -294,6 +294,33 @@ static const struct ipath_cregs ipath_pe_cregs = {
|
|
#define IPATH_GPIO_SCL (1ULL << \
|
|
#define IPATH_GPIO_SCL (1ULL << \
|
|
(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
|
|
(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Rev2 silicon allows suppressing check for ArmLaunch errors.
|
|
|
|
+ * this can speed up short packet sends on systems that do
|
|
|
|
+ * not guaranteee write-order.
|
|
|
|
+ */
|
|
|
|
+#define INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR (1ULL<<63)
|
|
|
|
+
|
|
|
|
+/* 6120 specific hardware errors... */
|
|
|
|
+static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
|
|
|
|
+ INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
|
|
|
|
+ INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
|
|
|
|
+ /*
|
|
|
|
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
|
|
|
|
+ * parity or memory parity error failures, because most likely we
|
|
|
|
+ * won't be able to talk to the core of the chip. Nonetheless, we
|
|
|
|
+ * might see them, if they are in parts of the PCIe core that aren't
|
|
|
|
+ * essential.
|
|
|
|
+ */
|
|
|
|
+ INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
|
|
|
|
+ INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
|
|
|
|
+ INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
|
|
|
|
+ INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
|
|
|
|
+ INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
|
|
|
|
+ INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
|
|
|
|
+ INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
|
|
|
|
+};
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* ipath_pe_handle_hwerrors - display hardware errors.
|
|
* ipath_pe_handle_hwerrors - display hardware errors.
|
|
* @dd: the infinipath device
|
|
* @dd: the infinipath device
|
|
@@ -343,19 +370,49 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
|
* make sure we get this much out, unless told to be quiet,
|
|
* make sure we get this much out, unless told to be quiet,
|
|
* or it's occurred within the last 5 seconds
|
|
* or it's occurred within the last 5 seconds
|
|
*/
|
|
*/
|
|
- if ((hwerrs & ~dd->ipath_lasthwerror) ||
|
|
|
|
|
|
+ if ((hwerrs & ~(dd->ipath_lasthwerror |
|
|
|
|
+ ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
|
|
|
|
+ INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
|
|
|
|
+ << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
|
|
(ipath_debug & __IPATH_VERBDBG))
|
|
(ipath_debug & __IPATH_VERBDBG))
|
|
dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
|
|
dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
|
|
"(cleared)\n", (unsigned long long) hwerrs);
|
|
"(cleared)\n", (unsigned long long) hwerrs);
|
|
dd->ipath_lasthwerror |= hwerrs;
|
|
dd->ipath_lasthwerror |= hwerrs;
|
|
|
|
|
|
- if (hwerrs & ~infinipath_hwe_bitsextant)
|
|
|
|
|
|
+ if (hwerrs & ~dd->ipath_hwe_bitsextant)
|
|
ipath_dev_err(dd, "hwerror interrupt with unknown errors "
|
|
ipath_dev_err(dd, "hwerror interrupt with unknown errors "
|
|
"%llx set\n", (unsigned long long)
|
|
"%llx set\n", (unsigned long long)
|
|
- (hwerrs & ~infinipath_hwe_bitsextant));
|
|
|
|
|
|
+ (hwerrs & ~dd->ipath_hwe_bitsextant));
|
|
|
|
|
|
ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
|
|
ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
|
|
if (ctrl & INFINIPATH_C_FREEZEMODE) {
|
|
if (ctrl & INFINIPATH_C_FREEZEMODE) {
|
|
|
|
+ /*
|
|
|
|
+ * parity errors in send memory are recoverable,
|
|
|
|
+ * just cancel the send (if indicated in * sendbuffererror),
|
|
|
|
+ * count the occurrence, unfreeze (if no other handled
|
|
|
|
+ * hardware error bits are set), and continue. They can
|
|
|
|
+ * occur if a processor speculative read is done to the PIO
|
|
|
|
+ * buffer while we are sending a packet, for example.
|
|
|
|
+ */
|
|
|
|
+ if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
|
|
|
|
+ INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
|
|
|
|
+ << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
|
|
|
|
+ ipath_stats.sps_txeparity++;
|
|
|
|
+ ipath_dbg("Recovering from TXE parity error (%llu), "
|
|
|
|
+ "hwerrstatus=%llx\n",
|
|
|
|
+ (unsigned long long) ipath_stats.sps_txeparity,
|
|
|
|
+ (unsigned long long) hwerrs);
|
|
|
|
+ ipath_disarm_senderrbufs(dd);
|
|
|
|
+ hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
|
|
|
|
+ INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
|
|
|
|
+ << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
|
|
|
|
+ if (!hwerrs) { /* else leave in freeze mode */
|
|
|
|
+ ipath_write_kreg(dd,
|
|
|
|
+ dd->ipath_kregs->kr_control,
|
|
|
|
+ dd->ipath_control);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
if (hwerrs) {
|
|
if (hwerrs) {
|
|
/*
|
|
/*
|
|
* if any set that we aren't ignoring only make the
|
|
* if any set that we aren't ignoring only make the
|
|
@@ -379,9 +436,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
|
} else {
|
|
} else {
|
|
ipath_dbg("Clearing freezemode on ignored hardware "
|
|
ipath_dbg("Clearing freezemode on ignored hardware "
|
|
"error\n");
|
|
"error\n");
|
|
- ctrl &= ~INFINIPATH_C_FREEZEMODE;
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
|
|
- ctrl);
|
|
|
|
|
|
+ dd->ipath_control);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -396,24 +452,13 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
|
|
dd->ipath_hwerrmask);
|
|
dd->ipath_hwerrmask);
|
|
}
|
|
}
|
|
- if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
|
|
|
|
- << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
|
|
|
|
- bits = (u32) ((hwerrs >>
|
|
|
|
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
|
|
|
|
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
|
|
|
|
- snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
|
|
|
|
- bits);
|
|
|
|
- strlcat(msg, bitsmsg, msgl);
|
|
|
|
- }
|
|
|
|
- if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
|
|
|
|
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
|
|
|
|
- bits = (u32) ((hwerrs >>
|
|
|
|
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
|
|
|
|
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
|
|
|
|
- snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
|
|
|
|
- bits);
|
|
|
|
- strlcat(msg, bitsmsg, msgl);
|
|
|
|
- }
|
|
|
|
|
|
+
|
|
|
|
+ ipath_format_hwerrors(hwerrs,
|
|
|
|
+ ipath_6120_hwerror_msgs,
|
|
|
|
+ sizeof(ipath_6120_hwerror_msgs)/
|
|
|
|
+ sizeof(ipath_6120_hwerror_msgs[0]),
|
|
|
|
+ msg, msgl);
|
|
|
|
+
|
|
if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
|
|
if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
|
|
<< INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
|
|
<< INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
|
|
bits = (u32) ((hwerrs >>
|
|
bits = (u32) ((hwerrs >>
|
|
@@ -423,10 +468,6 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
|
"[PCIe Mem Parity Errs %x] ", bits);
|
|
"[PCIe Mem Parity Errs %x] ", bits);
|
|
strlcat(msg, bitsmsg, msgl);
|
|
strlcat(msg, bitsmsg, msgl);
|
|
}
|
|
}
|
|
- if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
|
|
|
|
- strlcat(msg, "[IB2IPATH Parity]", msgl);
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
|
|
|
|
- strlcat(msg, "[IPATH2IB Parity]", msgl);
|
|
|
|
|
|
|
|
#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
|
|
#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
|
|
INFINIPATH_HWE_COREPLL_RFSLIP )
|
|
INFINIPATH_HWE_COREPLL_RFSLIP )
|
|
@@ -452,34 +493,6 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
|
dd->ipath_hwerrmask);
|
|
dd->ipath_hwerrmask);
|
|
}
|
|
}
|
|
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_PCIEPOISONEDTLP)
|
|
|
|
- strlcat(msg, "[PCIe Poisoned TLP]", msgl);
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_PCIECPLTIMEOUT)
|
|
|
|
- strlcat(msg, "[PCIe completion timeout]", msgl);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
|
|
|
|
- * parity or memory parity error failures, because most likely we
|
|
|
|
- * won't be able to talk to the core of the chip. Nonetheless, we
|
|
|
|
- * might see them, if they are in parts of the PCIe core that aren't
|
|
|
|
- * essential.
|
|
|
|
- */
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_PCIE1PLLFAILED)
|
|
|
|
- strlcat(msg, "[PCIePLL1]", msgl);
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_PCIE0PLLFAILED)
|
|
|
|
- strlcat(msg, "[PCIePLL0]", msgl);
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXTLH)
|
|
|
|
- strlcat(msg, "[PCIe XTLH core parity]", msgl);
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXADM)
|
|
|
|
- strlcat(msg, "[PCIe ADM TX core parity]", msgl);
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYRADM)
|
|
|
|
- strlcat(msg, "[PCIe ADM RX core parity]", msgl);
|
|
|
|
-
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
|
|
|
|
- strlcat(msg, "[Rx Dsync]", msgl);
|
|
|
|
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
|
|
|
|
- strlcat(msg, "[SerDes PLL]", msgl);
|
|
|
|
-
|
|
|
|
ipath_dev_err(dd, "%s hardware error\n", msg);
|
|
ipath_dev_err(dd, "%s hardware error\n", msg);
|
|
if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
|
|
if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
|
|
/*
|
|
/*
|
|
@@ -525,6 +538,9 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
|
|
case 5:
|
|
case 5:
|
|
n = "InfiniPath_QMH7140";
|
|
n = "InfiniPath_QMH7140";
|
|
break;
|
|
break;
|
|
|
|
+ case 6:
|
|
|
|
+ n = "InfiniPath_QLE7142";
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
ipath_dev_err(dd,
|
|
ipath_dev_err(dd,
|
|
"Don't yet know about board with ID %u\n",
|
|
"Don't yet know about board with ID %u\n",
|
|
@@ -571,9 +587,12 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
|
|
if (!dd->ipath_boardrev) // no PLL for Emulator
|
|
if (!dd->ipath_boardrev) // no PLL for Emulator
|
|
val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
|
|
val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
|
|
|
|
|
|
- /* workaround bug 9460 in internal interface bus parity checking */
|
|
|
|
- val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
|
|
|
|
-
|
|
|
|
|
|
+ if (dd->ipath_minrev < 2) {
|
|
|
|
+ /* workaround bug 9460 in internal interface bus parity
|
|
|
|
+ * checking. Fixed (HW bug 9490) in Rev2.
|
|
|
|
+ */
|
|
|
|
+ val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
|
|
|
|
+ }
|
|
dd->ipath_hwerrmask = val;
|
|
dd->ipath_hwerrmask = val;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -583,8 +602,8 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
|
|
*/
|
|
*/
|
|
static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
|
|
static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
|
|
{
|
|
{
|
|
- u64 val, tmp, config1;
|
|
|
|
- int ret = 0, change = 0;
|
|
|
|
|
|
+ u64 val, tmp, config1, prev_val;
|
|
|
|
+ int ret = 0;
|
|
|
|
|
|
ipath_dbg("Trying to bringup serdes\n");
|
|
ipath_dbg("Trying to bringup serdes\n");
|
|
|
|
|
|
@@ -641,6 +660,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
|
|
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
|
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
|
|
|
|
|
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
|
|
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
|
|
|
|
+ prev_val = val;
|
|
if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
|
|
if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
|
|
INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
|
|
INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
|
|
val &=
|
|
val &=
|
|
@@ -648,11 +668,9 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
|
|
INFINIPATH_XGXS_MDIOADDR_SHIFT);
|
|
INFINIPATH_XGXS_MDIOADDR_SHIFT);
|
|
/* MDIO address 3 */
|
|
/* MDIO address 3 */
|
|
val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
|
|
val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
|
|
- change = 1;
|
|
|
|
}
|
|
}
|
|
if (val & INFINIPATH_XGXS_RESET) {
|
|
if (val & INFINIPATH_XGXS_RESET) {
|
|
val &= ~INFINIPATH_XGXS_RESET;
|
|
val &= ~INFINIPATH_XGXS_RESET;
|
|
- change = 1;
|
|
|
|
}
|
|
}
|
|
if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
|
|
if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
|
|
INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
|
|
INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
|
|
@@ -661,9 +679,19 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
|
|
INFINIPATH_XGXS_RX_POL_SHIFT);
|
|
INFINIPATH_XGXS_RX_POL_SHIFT);
|
|
val |= dd->ipath_rx_pol_inv <<
|
|
val |= dd->ipath_rx_pol_inv <<
|
|
INFINIPATH_XGXS_RX_POL_SHIFT;
|
|
INFINIPATH_XGXS_RX_POL_SHIFT;
|
|
- change = 1;
|
|
|
|
}
|
|
}
|
|
- if (change)
|
|
|
|
|
|
+ if (dd->ipath_minrev >= 2) {
|
|
|
|
+ /* Rev 2. can tolerate multiple writes to PBC, and
|
|
|
|
+ * allowing them can provide lower latency on some
|
|
|
|
+ * CPUs, but this feature is off by default, only
|
|
|
|
+ * turned on by setting D63 of XGXSconfig reg.
|
|
|
|
+ * May want to make this conditional more
|
|
|
|
+ * fine-grained in future. This is not exactly
|
|
|
|
+ * related to XGXS, but where the bit ended up.
|
|
|
|
+ */
|
|
|
|
+ val |= INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR;
|
|
|
|
+ }
|
|
|
|
+ if (val != prev_val)
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
|
|
|
|
|
|
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
|
|
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
|
|
@@ -717,9 +745,25 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
|
|
}
|
|
}
|
|
|
|
|
|
-/* this is not yet needed on this chip, so just return 0. */
|
|
|
|
static int ipath_pe_intconfig(struct ipath_devdata *dd)
|
|
static int ipath_pe_intconfig(struct ipath_devdata *dd)
|
|
{
|
|
{
|
|
|
|
+ u64 val;
|
|
|
|
+ u32 chiprev;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If the chip supports added error indication via GPIO pins,
|
|
|
|
+ * enable interrupts on those bits so the interrupt routine
|
|
|
|
+ * can count the events. Also set flag so interrupt routine
|
|
|
|
+ * can know they are expected.
|
|
|
|
+ */
|
|
|
|
+ chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT;
|
|
|
|
+ if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
|
|
|
|
+ /* Rev2+ reports extra errors via internal GPIO pins */
|
|
|
|
+ dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
|
|
|
|
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
|
|
|
|
+ val |= IPATH_GPIO_ERRINTR_MASK;
|
|
|
|
+ ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val);
|
|
|
|
+ }
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -853,21 +897,23 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void ipath_init_pe_variables(void)
|
|
|
|
|
|
+static void ipath_init_pe_variables(struct ipath_devdata *dd)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* bits for selecting i2c direction and values,
|
|
* bits for selecting i2c direction and values,
|
|
* used for I2C serial flash
|
|
* used for I2C serial flash
|
|
*/
|
|
*/
|
|
- ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
|
|
|
|
- ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
|
|
|
|
- ipath_gpio_sda = IPATH_GPIO_SDA;
|
|
|
|
- ipath_gpio_scl = IPATH_GPIO_SCL;
|
|
|
|
|
|
+ dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
|
|
|
|
+ dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
|
|
|
|
+ dd->ipath_gpio_sda = IPATH_GPIO_SDA;
|
|
|
|
+ dd->ipath_gpio_scl = IPATH_GPIO_SCL;
|
|
|
|
|
|
/* variables for sanity checking interrupt and errors */
|
|
/* variables for sanity checking interrupt and errors */
|
|
- infinipath_hwe_bitsextant =
|
|
|
|
|
|
+ dd->ipath_hwe_bitsextant =
|
|
(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
|
|
(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
|
|
|
|
+ (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
|
|
|
|
+ INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
|
|
(INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
|
|
(INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
|
|
INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
|
|
INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
|
|
INFINIPATH_HWE_PCIE1PLLFAILED |
|
|
INFINIPATH_HWE_PCIE1PLLFAILED |
|
|
@@ -883,13 +929,13 @@ static void ipath_init_pe_variables(void)
|
|
INFINIPATH_HWE_SERDESPLLFAILED |
|
|
INFINIPATH_HWE_SERDESPLLFAILED |
|
|
INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
|
|
INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
|
|
INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
|
|
INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
|
|
- infinipath_i_bitsextant =
|
|
|
|
|
|
+ dd->ipath_i_bitsextant =
|
|
(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
|
|
(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
|
|
(INFINIPATH_I_RCVAVAIL_MASK <<
|
|
(INFINIPATH_I_RCVAVAIL_MASK <<
|
|
INFINIPATH_I_RCVAVAIL_SHIFT) |
|
|
INFINIPATH_I_RCVAVAIL_SHIFT) |
|
|
INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
|
|
INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
|
|
INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
|
|
INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
|
|
- infinipath_e_bitsextant =
|
|
|
|
|
|
+ dd->ipath_e_bitsextant =
|
|
INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
|
|
INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
|
|
INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
|
|
INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
|
|
INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
|
|
INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
|
|
@@ -907,8 +953,8 @@ static void ipath_init_pe_variables(void)
|
|
INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
|
|
INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
|
|
INFINIPATH_E_HARDWARE;
|
|
INFINIPATH_E_HARDWARE;
|
|
|
|
|
|
- infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
|
|
|
|
- infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
|
|
|
|
|
|
+ dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
|
|
|
|
+ dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
|
|
}
|
|
}
|
|
|
|
|
|
/* setup the MSI stuff again after a reset. I'd like to just call
|
|
/* setup the MSI stuff again after a reset. I'd like to just call
|
|
@@ -1082,6 +1128,45 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
|
|
mmiowb();
|
|
mmiowb();
|
|
spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
|
|
spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
|
|
}
|
|
}
|
|
|
|
+/**
|
|
|
|
+ * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
|
|
|
|
+ * @dd: the infinipath device
|
|
|
|
+ * @tidptr: pointer to the expected TID (in chip) to udpate
|
|
|
|
+ * @tidtype: 0 for eager, 1 for expected
|
|
|
|
+ * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
|
|
|
|
+ *
|
|
|
|
+ * This exists as a separate routine to allow for selection of the
|
|
|
|
+ * appropriate "flavor". The static calls in cleanup just use the
|
|
|
|
+ * revision-agnostic form, as they are not performance critical.
|
|
|
|
+ */
|
|
|
|
+static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
|
|
|
|
+ u32 type, unsigned long pa)
|
|
|
|
+{
|
|
|
|
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
|
|
|
|
+
|
|
|
|
+ if (pa != dd->ipath_tidinvalid) {
|
|
|
|
+ if (pa & ((1U << 11) - 1)) {
|
|
|
|
+ dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
|
|
|
|
+ "not 2KB aligned!\n", pa);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ pa >>= 11;
|
|
|
|
+ /* paranoia check */
|
|
|
|
+ if (pa & (7<<29))
|
|
|
|
+ ipath_dev_err(dd,
|
|
|
|
+ "BUG: Physical page address 0x%lx "
|
|
|
|
+ "has bits set in 31-29\n", pa);
|
|
|
|
+
|
|
|
|
+ if (type == 0)
|
|
|
|
+ pa |= dd->ipath_tidtemplate;
|
|
|
|
+ else /* for now, always full 4KB page */
|
|
|
|
+ pa |= 2 << 29;
|
|
|
|
+ }
|
|
|
|
+ if (dd->ipath_kregbase)
|
|
|
|
+ writel(pa, tidp32);
|
|
|
|
+ mmiowb();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
|
|
/**
|
|
/**
|
|
* ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
|
|
* ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
|
|
@@ -1203,7 +1288,7 @@ int __attribute__((weak)) ipath_unordered_wc(void)
|
|
|
|
|
|
/**
|
|
/**
|
|
* ipath_init_pe_get_base_info - set chip-specific flags for user code
|
|
* ipath_init_pe_get_base_info - set chip-specific flags for user code
|
|
- * @dd: the infinipath device
|
|
|
|
|
|
+ * @pd: the infinipath port
|
|
* @kbase: ipath_base_info pointer
|
|
* @kbase: ipath_base_info pointer
|
|
*
|
|
*
|
|
* We set the PCIE flag because the lower bandwidth on PCIe vs
|
|
* We set the PCIE flag because the lower bandwidth on PCIe vs
|
|
@@ -1212,6 +1297,7 @@ int __attribute__((weak)) ipath_unordered_wc(void)
|
|
static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
|
|
static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
|
|
{
|
|
{
|
|
struct ipath_base_info *kinfo = kbase;
|
|
struct ipath_base_info *kinfo = kbase;
|
|
|
|
+ struct ipath_devdata *dd;
|
|
|
|
|
|
if (ipath_unordered_wc()) {
|
|
if (ipath_unordered_wc()) {
|
|
kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
|
|
kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
|
|
@@ -1220,8 +1306,20 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
|
|
else
|
|
else
|
|
ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
|
|
ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
|
|
|
|
|
|
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
|
|
|
|
|
|
+ if (pd == NULL)
|
|
|
|
+ goto done;
|
|
|
|
|
|
|
|
+ dd = pd->port_dd;
|
|
|
|
+
|
|
|
|
+ if (dd != NULL && dd->ipath_minrev >= 2) {
|
|
|
|
+ ipath_cdbg(PROC, "IBA6120 Rev2, allow multiple PBC write\n");
|
|
|
|
+ kinfo->spi_runtime_flags |= IPATH_RUNTIME_PBC_REWRITE;
|
|
|
|
+ ipath_cdbg(PROC, "IBA6120 Rev2, allow loose DMA alignment\n");
|
|
|
|
+ kinfo->spi_runtime_flags |= IPATH_RUNTIME_LOOSE_DMA_ALIGN;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+done:
|
|
|
|
+ kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1244,7 +1342,10 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
|
|
dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
|
|
dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
|
|
dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
|
|
dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
|
|
dd->ipath_f_clear_tids = ipath_pe_clear_tids;
|
|
dd->ipath_f_clear_tids = ipath_pe_clear_tids;
|
|
- dd->ipath_f_put_tid = ipath_pe_put_tid;
|
|
|
|
|
|
+ if (dd->ipath_minrev >= 2)
|
|
|
|
+ dd->ipath_f_put_tid = ipath_pe_put_tid_2;
|
|
|
|
+ else
|
|
|
|
+ dd->ipath_f_put_tid = ipath_pe_put_tid;
|
|
dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
|
|
dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
|
|
dd->ipath_f_setextled = ipath_setup_pe_setextled;
|
|
dd->ipath_f_setextled = ipath_setup_pe_setextled;
|
|
dd->ipath_f_get_base_info = ipath_pe_get_base_info;
|
|
dd->ipath_f_get_base_info = ipath_pe_get_base_info;
|
|
@@ -1259,6 +1360,6 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
|
|
dd->ipath_kregs = &ipath_pe_kregs;
|
|
dd->ipath_kregs = &ipath_pe_kregs;
|
|
dd->ipath_cregs = &ipath_pe_cregs;
|
|
dd->ipath_cregs = &ipath_pe_cregs;
|
|
|
|
|
|
- ipath_init_pe_variables();
|
|
|
|
|
|
+ ipath_init_pe_variables(dd);
|
|
}
|
|
}
|
|
|
|
|