|
@@ -2621,6 +2621,7 @@ static int skge_down(struct net_device *dev)
|
|
|
|
|
|
static inline int skge_avail(const struct skge_ring *ring)
|
|
static inline int skge_avail(const struct skge_ring *ring)
|
|
{
|
|
{
|
|
|
|
+ smp_mb();
|
|
return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
|
|
return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
|
|
+ (ring->to_clean - ring->to_use) - 1;
|
|
+ (ring->to_clean - ring->to_use) - 1;
|
|
}
|
|
}
|
|
@@ -2709,6 +2710,8 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
|
dev->name, e - skge->tx_ring.start, skb->len);
|
|
dev->name, e - skge->tx_ring.start, skb->len);
|
|
|
|
|
|
skge->tx_ring.to_use = e->next;
|
|
skge->tx_ring.to_use = e->next;
|
|
|
|
+ smp_wmb();
|
|
|
|
+
|
|
if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
|
|
if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
|
|
pr_debug("%s: transmit queue full\n", dev->name);
|
|
pr_debug("%s: transmit queue full\n", dev->name);
|
|
netif_stop_queue(dev);
|
|
netif_stop_queue(dev);
|
|
@@ -2726,8 +2729,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
|
|
{
|
|
{
|
|
struct pci_dev *pdev = skge->hw->pdev;
|
|
struct pci_dev *pdev = skge->hw->pdev;
|
|
|
|
|
|
- BUG_ON(!e->skb);
|
|
|
|
-
|
|
|
|
/* skb header vs. fragment */
|
|
/* skb header vs. fragment */
|
|
if (control & BMU_STF)
|
|
if (control & BMU_STF)
|
|
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
|
|
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
|
|
@@ -2745,7 +2746,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
|
|
|
|
|
|
dev_kfree_skb(e->skb);
|
|
dev_kfree_skb(e->skb);
|
|
}
|
|
}
|
|
- e->skb = NULL;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* Free all buffers in transmit ring */
|
|
/* Free all buffers in transmit ring */
|
|
@@ -3017,21 +3017,29 @@ static void skge_tx_done(struct net_device *dev)
|
|
|
|
|
|
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
|
|
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
|
|
|
|
|
|
- netif_tx_lock(dev);
|
|
|
|
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
|
|
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
|
|
- struct skge_tx_desc *td = e->desc;
|
|
|
|
|
|
+ u32 control = ((const struct skge_tx_desc *) e->desc)->control;
|
|
|
|
|
|
- if (td->control & BMU_OWN)
|
|
|
|
|
|
+ if (control & BMU_OWN)
|
|
break;
|
|
break;
|
|
|
|
|
|
- skge_tx_free(skge, e, td->control);
|
|
|
|
|
|
+ skge_tx_free(skge, e, control);
|
|
}
|
|
}
|
|
skge->tx_ring.to_clean = e;
|
|
skge->tx_ring.to_clean = e;
|
|
|
|
|
|
- if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
|
|
|
|
- netif_wake_queue(dev);
|
|
|
|
|
|
+ /* Can run lockless until we need to synchronize to restart queue. */
|
|
|
|
+ smp_mb();
|
|
|
|
+
|
|
|
|
+ if (unlikely(netif_queue_stopped(dev) &&
|
|
|
|
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
|
|
|
|
+ netif_tx_lock(dev);
|
|
|
|
+ if (unlikely(netif_queue_stopped(dev) &&
|
|
|
|
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
|
|
|
|
+ netif_wake_queue(dev);
|
|
|
|
|
|
- netif_tx_unlock(dev);
|
|
|
|
|
|
+ }
|
|
|
|
+ netif_tx_unlock(dev);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static int skge_poll(struct net_device *dev, int *budget)
|
|
static int skge_poll(struct net_device *dev, int *budget)
|