|
@@ -2303,21 +2303,20 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
|
|
{
|
|
|
struct skge_port *skge = netdev_priv(dev);
|
|
|
struct skge_hw *hw = skge->hw;
|
|
|
- struct skge_ring *ring = &skge->tx_ring;
|
|
|
struct skge_element *e;
|
|
|
struct skge_tx_desc *td;
|
|
|
int i;
|
|
|
u32 control, len;
|
|
|
u64 map;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
skb = skb_padto(skb, ETH_ZLEN);
|
|
|
if (!skb)
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
- if (!spin_trylock(&skge->tx_lock)) {
|
|
|
+ if (!spin_trylock_irqsave(&skge->tx_lock, flags))
|
|
|
/* Collision - tell upper layer to requeue */
|
|
|
return NETDEV_TX_LOCKED;
|
|
|
- }
|
|
|
|
|
|
if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
|
|
|
if (!netif_queue_stopped(dev)) {
|
|
@@ -2326,12 +2325,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
|
|
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
|
|
|
dev->name);
|
|
|
}
|
|
|
- spin_unlock(&skge->tx_lock);
|
|
|
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
|
|
|
- e = ring->to_use;
|
|
|
+ e = skge->tx_ring.to_use;
|
|
|
td = e->desc;
|
|
|
+ BUG_ON(td->control & BMU_OWN);
|
|
|
e->skb = skb;
|
|
|
len = skb_headlen(skb);
|
|
|
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
|
@@ -2372,8 +2372,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
|
|
frag->size, PCI_DMA_TODEVICE);
|
|
|
|
|
|
e = e->next;
|
|
|
- e->skb = NULL;
|
|
|
+ e->skb = skb;
|
|
|
tf = e->desc;
|
|
|
+ BUG_ON(tf->control & BMU_OWN);
|
|
|
+
|
|
|
tf->dma_lo = map;
|
|
|
tf->dma_hi = (u64) map >> 32;
|
|
|
pci_unmap_addr_set(e, mapaddr, map);
|
|
@@ -2390,56 +2392,68 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
|
|
|
|
|
|
- if (netif_msg_tx_queued(skge))
|
|
|
+ if (unlikely(netif_msg_tx_queued(skge)))
|
|
|
printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
|
|
|
- dev->name, e - ring->start, skb->len);
|
|
|
+ dev->name, e - skge->tx_ring.start, skb->len);
|
|
|
|
|
|
- ring->to_use = e->next;
|
|
|
+ skge->tx_ring.to_use = e->next;
|
|
|
if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
|
|
|
pr_debug("%s: transmit queue full\n", dev->name);
|
|
|
netif_stop_queue(dev);
|
|
|
}
|
|
|
|
|
|
- mmiowb();
|
|
|
- spin_unlock(&skge->tx_lock);
|
|
|
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
|
|
|
|
|
|
dev->trans_start = jiffies;
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
-static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
|
|
|
+
|
|
|
+/* Free resources associated with this reing element */
|
|
|
+static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
|
|
|
+ u32 control)
|
|
|
{
|
|
|
struct pci_dev *pdev = skge->hw->pdev;
|
|
|
- struct skge_element *e;
|
|
|
|
|
|
- for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
|
|
|
- struct sk_buff *skb = e->skb;
|
|
|
- int i;
|
|
|
+ BUG_ON(!e->skb);
|
|
|
|
|
|
- e->skb = NULL;
|
|
|
+ /* skb header vs. fragment */
|
|
|
+ if (control & BMU_STF)
|
|
|
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
|
|
|
- skb_headlen(skb), PCI_DMA_TODEVICE);
|
|
|
+ pci_unmap_len(e, maplen),
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
+ else
|
|
|
+ pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
|
|
|
+ pci_unmap_len(e, maplen),
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
|
|
|
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
|
- e = e->next;
|
|
|
- pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
|
|
|
- skb_shinfo(skb)->frags[i].size,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
- }
|
|
|
+ if (control & BMU_EOF) {
|
|
|
+ if (unlikely(netif_msg_tx_done(skge)))
|
|
|
+ printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
|
|
|
+ skge->netdev->name, e - skge->tx_ring.start);
|
|
|
|
|
|
- dev_kfree_skb(skb);
|
|
|
+ dev_kfree_skb_any(e->skb);
|
|
|
}
|
|
|
- skge->tx_ring.to_clean = e;
|
|
|
+ e->skb = NULL;
|
|
|
}
|
|
|
|
|
|
+/* Free all buffers in transmit ring */
|
|
|
static void skge_tx_clean(struct skge_port *skge)
|
|
|
{
|
|
|
+ struct skge_element *e;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock_bh(&skge->tx_lock);
|
|
|
- skge_tx_complete(skge, skge->tx_ring.to_use);
|
|
|
+ spin_lock_irqsave(&skge->tx_lock, flags);
|
|
|
+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
|
|
|
+ struct skge_tx_desc *td = e->desc;
|
|
|
+ skge_tx_free(skge, e, td->control);
|
|
|
+ td->control = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ skge->tx_ring.to_clean = e;
|
|
|
netif_wake_queue(skge->netdev);
|
|
|
- spin_unlock_bh(&skge->tx_lock);
|
|
|
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void skge_tx_timeout(struct net_device *dev)
|
|
@@ -2665,32 +2679,28 @@ resubmit:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static void skge_tx_done(struct skge_port *skge)
|
|
|
+/* Free all buffers in Tx ring which are no longer owned by device */
|
|
|
+static void skge_txirq(struct net_device *dev)
|
|
|
{
|
|
|
+ struct skge_port *skge = netdev_priv(dev);
|
|
|
struct skge_ring *ring = &skge->tx_ring;
|
|
|
- struct skge_element *e, *last;
|
|
|
+ struct skge_element *e;
|
|
|
+
|
|
|
+ rmb();
|
|
|
|
|
|
spin_lock(&skge->tx_lock);
|
|
|
- last = ring->to_clean;
|
|
|
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
|
|
|
struct skge_tx_desc *td = e->desc;
|
|
|
|
|
|
if (td->control & BMU_OWN)
|
|
|
break;
|
|
|
|
|
|
- if (td->control & BMU_EOF) {
|
|
|
- last = e->next;
|
|
|
- if (unlikely(netif_msg_tx_done(skge)))
|
|
|
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
|
|
|
- skge->netdev->name, e - ring->start);
|
|
|
- }
|
|
|
+ skge_tx_free(skge, e, td->control);
|
|
|
}
|
|
|
+ skge->tx_ring.to_clean = e;
|
|
|
|
|
|
- skge_tx_complete(skge, last);
|
|
|
-
|
|
|
- skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
|
|
|
-
|
|
|
- if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
|
|
|
+ if (netif_queue_stopped(skge->netdev)
|
|
|
+ && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
|
|
|
netif_wake_queue(skge->netdev);
|
|
|
|
|
|
spin_unlock(&skge->tx_lock);
|
|
@@ -2705,8 +2715,6 @@ static int skge_poll(struct net_device *dev, int *budget)
|
|
|
int to_do = min(dev->quota, *budget);
|
|
|
int work_done = 0;
|
|
|
|
|
|
- skge_tx_done(skge);
|
|
|
-
|
|
|
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
|
|
|
struct skge_rx_desc *rd = e->desc;
|
|
|
struct sk_buff *skb;
|
|
@@ -2738,10 +2746,12 @@ static int skge_poll(struct net_device *dev, int *budget)
|
|
|
return 1; /* not done */
|
|
|
|
|
|
netif_rx_complete(dev);
|
|
|
- mmiowb();
|
|
|
|
|
|
- hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
|
|
|
+ spin_lock_irq(&hw->hw_lock);
|
|
|
+ hw->intr_mask |= rxirqmask[skge->port];
|
|
|
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
|
|
+ mmiowb();
|
|
|
+ spin_unlock_irq(&hw->hw_lock);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2871,8 +2881,10 @@ static void skge_extirq(void *arg)
|
|
|
}
|
|
|
mutex_unlock(&hw->phy_mutex);
|
|
|
|
|
|
+ spin_lock_irq(&hw->hw_lock);
|
|
|
hw->intr_mask |= IS_EXT_REG;
|
|
|
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
|
|
+ spin_unlock_irq(&hw->hw_lock);
|
|
|
}
|
|
|
|
|
|
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
|
|
@@ -2885,54 +2897,68 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
|
|
|
if (status == 0)
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
+ spin_lock(&hw->hw_lock);
|
|
|
+ status &= hw->intr_mask;
|
|
|
if (status & IS_EXT_REG) {
|
|
|
hw->intr_mask &= ~IS_EXT_REG;
|
|
|
schedule_work(&hw->phy_work);
|
|
|
}
|
|
|
|
|
|
- if (status & (IS_R1_F|IS_XA1_F)) {
|
|
|
- skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
|
|
|
- hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
|
|
|
- netif_rx_schedule(hw->dev[0]);
|
|
|
+ if (status & IS_XA1_F) {
|
|
|
+ skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
|
|
|
+ skge_txirq(hw->dev[0]);
|
|
|
}
|
|
|
|
|
|
- if (status & (IS_R2_F|IS_XA2_F)) {
|
|
|
- skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
|
|
|
- hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
|
|
|
- netif_rx_schedule(hw->dev[1]);
|
|
|
+ if (status & IS_R1_F) {
|
|
|
+ skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
|
|
|
+ hw->intr_mask &= ~IS_R1_F;
|
|
|
+ netif_rx_schedule(hw->dev[0]);
|
|
|
}
|
|
|
|
|
|
- if (likely((status & hw->intr_mask) == 0))
|
|
|
- return IRQ_HANDLED;
|
|
|
+ if (status & IS_PA_TO_TX1)
|
|
|
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
|
|
|
|
|
|
if (status & IS_PA_TO_RX1) {
|
|
|
struct skge_port *skge = netdev_priv(hw->dev[0]);
|
|
|
- ++skge->net_stats.rx_over_errors;
|
|
|
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
|
|
|
- }
|
|
|
|
|
|
- if (status & IS_PA_TO_RX2) {
|
|
|
- struct skge_port *skge = netdev_priv(hw->dev[1]);
|
|
|
++skge->net_stats.rx_over_errors;
|
|
|
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
|
|
|
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
|
|
|
}
|
|
|
|
|
|
- if (status & IS_PA_TO_TX1)
|
|
|
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
|
|
|
-
|
|
|
- if (status & IS_PA_TO_TX2)
|
|
|
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
|
|
|
|
|
|
if (status & IS_MAC1)
|
|
|
skge_mac_intr(hw, 0);
|
|
|
|
|
|
- if (status & IS_MAC2)
|
|
|
- skge_mac_intr(hw, 1);
|
|
|
+ if (hw->dev[1]) {
|
|
|
+ if (status & IS_XA2_F) {
|
|
|
+ skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
|
|
|
+ skge_txirq(hw->dev[1]);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (status & IS_R2_F) {
|
|
|
+ skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
|
|
|
+ hw->intr_mask &= ~IS_R2_F;
|
|
|
+ netif_rx_schedule(hw->dev[1]);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (status & IS_PA_TO_RX2) {
|
|
|
+ struct skge_port *skge = netdev_priv(hw->dev[1]);
|
|
|
+ ++skge->net_stats.rx_over_errors;
|
|
|
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (status & IS_PA_TO_TX2)
|
|
|
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
|
|
|
+
|
|
|
+ if (status & IS_MAC2)
|
|
|
+ skge_mac_intr(hw, 1);
|
|
|
+ }
|
|
|
|
|
|
if (status & IS_HW_ERR)
|
|
|
skge_error_irq(hw);
|
|
|
|
|
|
skge_write32(hw, B0_IMSK, hw->intr_mask);
|
|
|
+ spin_unlock(&hw->hw_lock);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
@@ -3083,6 +3109,7 @@ static int skge_reset(struct skge_hw *hw)
|
|
|
else
|
|
|
hw->ram_size = t8 * 4096;
|
|
|
|
|
|
+ spin_lock_init(&hw->hw_lock);
|
|
|
hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
|
|
|
if (hw->ports > 1)
|
|
|
hw->intr_mask |= IS_PORT_2;
|
|
@@ -3389,7 +3416,11 @@ static void __devexit skge_remove(struct pci_dev *pdev)
|
|
|
dev0 = hw->dev[0];
|
|
|
unregister_netdev(dev0);
|
|
|
|
|
|
+ spin_lock_irq(&hw->hw_lock);
|
|
|
+ hw->intr_mask = 0;
|
|
|
skge_write32(hw, B0_IMSK, 0);
|
|
|
+ spin_unlock_irq(&hw->hw_lock);
|
|
|
+
|
|
|
skge_write16(hw, B0_LED, LED_STAT_OFF);
|
|
|
skge_write8(hw, B0_CTST, CS_RST_SET);
|
|
|
|