|
@@ -1250,17 +1250,12 @@ static void gfar_timeout(struct net_device *dev)
|
|
}
|
|
}
|
|
|
|
|
|
/* Interrupt Handler for Transmit complete */
|
|
/* Interrupt Handler for Transmit complete */
|
|
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
|
|
|
|
|
|
+int gfar_clean_tx_ring(struct net_device *dev)
|
|
{
|
|
{
|
|
- struct net_device *dev = (struct net_device *) dev_id;
|
|
|
|
- struct gfar_private *priv = netdev_priv(dev);
|
|
|
|
struct txbd8 *bdp;
|
|
struct txbd8 *bdp;
|
|
|
|
+ struct gfar_private *priv = netdev_priv(dev);
|
|
|
|
+ int howmany = 0;
|
|
|
|
|
|
- /* Clear IEVENT */
|
|
|
|
- gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
|
|
|
|
-
|
|
|
|
- /* Lock priv */
|
|
|
|
- spin_lock(&priv->txlock);
|
|
|
|
bdp = priv->dirty_tx;
|
|
bdp = priv->dirty_tx;
|
|
while ((bdp->status & TXBD_READY) == 0) {
|
|
while ((bdp->status & TXBD_READY) == 0) {
|
|
/* If dirty_tx and cur_tx are the same, then either the */
|
|
/* If dirty_tx and cur_tx are the same, then either the */
|
|
@@ -1269,7 +1264,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
|
|
if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
|
|
if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
|
|
break;
|
|
break;
|
|
|
|
|
|
- dev->stats.tx_packets++;
|
|
|
|
|
|
+ howmany++;
|
|
|
|
|
|
/* Deferred means some collisions occurred during transmit, */
|
|
/* Deferred means some collisions occurred during transmit, */
|
|
/* but we eventually sent the packet. */
|
|
/* but we eventually sent the packet. */
|
|
@@ -1278,11 +1273,15 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
|
|
|
|
|
|
/* Free the sk buffer associated with this TxBD */
|
|
/* Free the sk buffer associated with this TxBD */
|
|
dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
|
|
dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
|
|
|
|
+
|
|
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
|
|
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
|
|
priv->skb_dirtytx =
|
|
priv->skb_dirtytx =
|
|
(priv->skb_dirtytx +
|
|
(priv->skb_dirtytx +
|
|
1) & TX_RING_MOD_MASK(priv->tx_ring_size);
|
|
1) & TX_RING_MOD_MASK(priv->tx_ring_size);
|
|
|
|
|
|
|
|
+ /* Clean BD length for empty detection */
|
|
|
|
+ bdp->length = 0;
|
|
|
|
+
|
|
/* update bdp to point at next bd in the ring (wrapping if necessary) */
|
|
/* update bdp to point at next bd in the ring (wrapping if necessary) */
|
|
if (bdp->status & TXBD_WRAP)
|
|
if (bdp->status & TXBD_WRAP)
|
|
bdp = priv->tx_bd_base;
|
|
bdp = priv->tx_bd_base;
|
|
@@ -1297,6 +1296,25 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
|
|
netif_wake_queue(dev);
|
|
netif_wake_queue(dev);
|
|
} /* while ((bdp->status & TXBD_READY) == 0) */
|
|
} /* while ((bdp->status & TXBD_READY) == 0) */
|
|
|
|
|
|
|
|
+ dev->stats.tx_packets += howmany;
|
|
|
|
+
|
|
|
|
+ return howmany;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Interrupt Handler for Transmit complete */
|
|
|
|
+static irqreturn_t gfar_transmit(int irq, void *dev_id)
|
|
|
|
+{
|
|
|
|
+ struct net_device *dev = (struct net_device *) dev_id;
|
|
|
|
+ struct gfar_private *priv = netdev_priv(dev);
|
|
|
|
+
|
|
|
|
+ /* Clear IEVENT */
|
|
|
|
+ gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
|
|
|
|
+
|
|
|
|
+ /* Lock priv */
|
|
|
|
+ spin_lock(&priv->txlock);
|
|
|
|
+
|
|
|
|
+ gfar_clean_tx_ring(dev);
|
|
|
|
+
|
|
/* If we are coalescing the interrupts, reset the timer */
|
|
/* If we are coalescing the interrupts, reset the timer */
|
|
/* Otherwise, clear it */
|
|
/* Otherwise, clear it */
|
|
if (likely(priv->txcoalescing)) {
|
|
if (likely(priv->txcoalescing)) {
|
|
@@ -1392,15 +1410,15 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- /* Clear IEVENT, so rx interrupt isn't called again
|
|
|
|
- * because of this interrupt */
|
|
|
|
- gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
|
|
|
|
-
|
|
|
|
/* support NAPI */
|
|
/* support NAPI */
|
|
#ifdef CONFIG_GFAR_NAPI
|
|
#ifdef CONFIG_GFAR_NAPI
|
|
|
|
+ /* Clear IEVENT, so interrupts aren't called again
|
|
|
|
+ * because of the packets that have already arrived */
|
|
|
|
+ gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
|
|
|
|
+
|
|
if (netif_rx_schedule_prep(dev, &priv->napi)) {
|
|
if (netif_rx_schedule_prep(dev, &priv->napi)) {
|
|
tempval = gfar_read(&priv->regs->imask);
|
|
tempval = gfar_read(&priv->regs->imask);
|
|
- tempval &= IMASK_RX_DISABLED;
|
|
|
|
|
|
+ tempval &= IMASK_RTX_DISABLED;
|
|
gfar_write(&priv->regs->imask, tempval);
|
|
gfar_write(&priv->regs->imask, tempval);
|
|
|
|
|
|
__netif_rx_schedule(dev, &priv->napi);
|
|
__netif_rx_schedule(dev, &priv->napi);
|
|
@@ -1411,6 +1429,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
|
|
gfar_read(&priv->regs->imask));
|
|
gfar_read(&priv->regs->imask));
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
|
|
+ /* Clear IEVENT, so rx interrupt isn't called again
|
|
|
|
+ * because of this interrupt */
|
|
|
|
+ gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
|
|
|
|
|
|
spin_lock_irqsave(&priv->rxlock, flags);
|
|
spin_lock_irqsave(&priv->rxlock, flags);
|
|
gfar_clean_rx_ring(dev, priv->rx_ring_size);
|
|
gfar_clean_rx_ring(dev, priv->rx_ring_size);
|
|
@@ -1580,6 +1601,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
|
|
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
|
|
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
|
|
struct net_device *dev = priv->dev;
|
|
struct net_device *dev = priv->dev;
|
|
int howmany;
|
|
int howmany;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ /* If we fail to get the lock, don't bother with the TX BDs */
|
|
|
|
+ if (spin_trylock_irqsave(&priv->txlock, flags)) {
|
|
|
|
+ gfar_clean_tx_ring(dev);
|
|
|
|
+ spin_unlock_irqrestore(&priv->txlock, flags);
|
|
|
|
+ }
|
|
|
|
|
|
howmany = gfar_clean_rx_ring(dev, budget);
|
|
howmany = gfar_clean_rx_ring(dev, budget);
|
|
|
|
|