|
@@ -2508,15 +2508,19 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
|
|
u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
|
|
u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
|
|
int err = 0;
|
|
int err = 0;
|
|
|
|
|
|
- dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
|
|
|
|
- np->dev->name, rp->rx_channel, (unsigned long long) stat);
|
|
|
|
-
|
|
|
|
- niu_log_rxchan_errors(np, rp, stat);
|
|
|
|
|
|
|
|
if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
|
|
if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
|
|
RX_DMA_CTL_STAT_PORT_FATAL))
|
|
RX_DMA_CTL_STAT_PORT_FATAL))
|
|
err = -EINVAL;
|
|
err = -EINVAL;
|
|
|
|
|
|
|
|
+ if (err) {
|
|
|
|
+ dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
|
|
|
|
+ np->dev->name, rp->rx_channel,
|
|
|
|
+ (unsigned long long) stat);
|
|
|
|
+
|
|
|
|
+ niu_log_rxchan_errors(np, rp, stat);
|
|
|
|
+ }
|
|
|
|
+
|
|
nw64(RX_DMA_CTL_STAT(rp->rx_channel),
|
|
nw64(RX_DMA_CTL_STAT(rp->rx_channel),
|
|
stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
|
|
stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
|
|
|
|
|
|
@@ -2749,13 +2753,16 @@ static int niu_device_error(struct niu *np)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
|
|
|
|
-static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp)
|
|
|
|
|
|
+static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
|
|
|
|
+ u64 v0, u64 v1, u64 v2)
|
|
{
|
|
{
|
|
- u64 v0 = lp->v0;
|
|
|
|
- u64 v1 = lp->v1;
|
|
|
|
- u64 v2 = lp->v2;
|
|
|
|
|
|
+
|
|
int i, err = 0;
|
|
int i, err = 0;
|
|
|
|
|
|
|
|
+ lp->v0 = v0;
|
|
|
|
+ lp->v1 = v1;
|
|
|
|
+ lp->v2 = v2;
|
|
|
|
+
|
|
if (v1 & 0x00000000ffffffffULL) {
|
|
if (v1 & 0x00000000ffffffffULL) {
|
|
u32 rx_vec = (v1 & 0xffffffff);
|
|
u32 rx_vec = (v1 & 0xffffffff);
|
|
|
|
|
|
@@ -2764,8 +2771,13 @@ static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp)
|
|
|
|
|
|
if (rx_vec & (1 << rp->rx_channel)) {
|
|
if (rx_vec & (1 << rp->rx_channel)) {
|
|
int r = niu_rx_error(np, rp);
|
|
int r = niu_rx_error(np, rp);
|
|
- if (r)
|
|
|
|
|
|
+ if (r) {
|
|
err = r;
|
|
err = r;
|
|
|
|
+ } else {
|
|
|
|
+ if (!v0)
|
|
|
|
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel),
|
|
|
|
+ RX_DMA_CTL_STAT_MEX);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2803,7 +2815,7 @@ static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp)
|
|
if (err)
|
|
if (err)
|
|
niu_enable_interrupts(np, 0);
|
|
niu_enable_interrupts(np, 0);
|
|
|
|
|
|
- return -EINVAL;
|
|
|
|
|
|
+ return err;
|
|
}
|
|
}
|
|
|
|
|
|
static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
|
|
static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
|
|
@@ -2905,7 +2917,7 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
|
|
}
|
|
}
|
|
|
|
|
|
if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
|
|
if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
|
|
- int err = niu_slowpath_interrupt(np, lp);
|
|
|
|
|
|
+ int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
|
|
if (err)
|
|
if (err)
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|