|
@@ -478,7 +478,7 @@ rx_status_loop:
|
|
|
|
|
|
while (1) {
|
|
|
u32 status, len;
|
|
|
- dma_addr_t mapping;
|
|
|
+ dma_addr_t mapping, new_mapping;
|
|
|
struct sk_buff *skb, *new_skb;
|
|
|
struct cp_desc *desc;
|
|
|
const unsigned buflen = cp->rx_buf_sz;
|
|
@@ -520,6 +520,13 @@ rx_status_loop:
|
|
|
goto rx_next;
|
|
|
}
|
|
|
|
|
|
+ new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+ if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
|
|
|
+ dev->stats.rx_dropped++;
|
|
|
+ goto rx_next;
|
|
|
+ }
|
|
|
+
|
|
|
dma_unmap_single(&cp->pdev->dev, mapping,
|
|
|
buflen, PCI_DMA_FROMDEVICE);
|
|
|
|
|
@@ -531,12 +538,11 @@ rx_status_loop:
|
|
|
|
|
|
skb_put(skb, len);
|
|
|
|
|
|
- mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
cp->rx_skb[rx_tail] = new_skb;
|
|
|
|
|
|
cp_rx_skb(cp, skb, desc);
|
|
|
rx++;
|
|
|
+ mapping = new_mapping;
|
|
|
|
|
|
rx_next:
|
|
|
cp->rx_ring[rx_tail].opts2 = 0;
|
|
@@ -716,6 +722,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
|
|
|
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
|
|
|
}
|
|
|
|
|
|
+static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
|
|
|
+ int first, int entry_last)
|
|
|
+{
|
|
|
+ int frag, index;
|
|
|
+ struct cp_desc *txd;
|
|
|
+ skb_frag_t *this_frag;
|
|
|
+ for (frag = 0; frag+first < entry_last; frag++) {
|
|
|
+ index = first+frag;
|
|
|
+ cp->tx_skb[index] = NULL;
|
|
|
+ txd = &cp->tx_ring[index];
|
|
|
+ this_frag = &skb_shinfo(skb)->frags[frag];
|
|
|
+ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
|
|
|
+ skb_frag_size(this_frag), PCI_DMA_TODEVICE);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
|
|
|
struct net_device *dev)
|
|
|
{
|
|
@@ -749,6 +771,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
|
|
|
|
|
|
len = skb->len;
|
|
|
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
|
|
|
+ if (dma_mapping_error(&cp->pdev->dev, mapping))
|
|
|
+ goto out_dma_error;
|
|
|
+
|
|
|
txd->opts2 = opts2;
|
|
|
txd->addr = cpu_to_le64(mapping);
|
|
|
wmb();
|
|
@@ -786,6 +811,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
|
|
|
first_len = skb_headlen(skb);
|
|
|
first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
|
|
|
first_len, PCI_DMA_TODEVICE);
|
|
|
+ if (dma_mapping_error(&cp->pdev->dev, first_mapping))
|
|
|
+ goto out_dma_error;
|
|
|
+
|
|
|
cp->tx_skb[entry] = skb;
|
|
|
entry = NEXT_TX(entry);
|
|
|
|
|
@@ -799,6 +827,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
|
|
|
mapping = dma_map_single(&cp->pdev->dev,
|
|
|
skb_frag_address(this_frag),
|
|
|
len, PCI_DMA_TODEVICE);
|
|
|
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
|
|
|
+ unwind_tx_frag_mapping(cp, skb, first_entry, entry);
|
|
|
+ goto out_dma_error;
|
|
|
+ }
|
|
|
+
|
|
|
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
|
|
|
|
|
|
ctrl = eor | len | DescOwn;
|
|
@@ -859,11 +892,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
|
|
|
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
|
|
|
netif_stop_queue(dev);
|
|
|
|
|
|
+out_unlock:
|
|
|
spin_unlock_irqrestore(&cp->lock, intr_flags);
|
|
|
|
|
|
cpw8(TxPoll, NormalTxPoll);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
+out_dma_error:
|
|
|
+ kfree_skb(skb);
|
|
|
+ cp->dev->stats.tx_dropped++;
|
|
|
+ goto out_unlock;
|
|
|
}
|
|
|
|
|
|
/* Set or clear the multicast filter for this adaptor.
|
|
@@ -1054,6 +1092,10 @@ static int cp_refill_rx(struct cp_private *cp)
|
|
|
|
|
|
mapping = dma_map_single(&cp->pdev->dev, skb->data,
|
|
|
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
|
|
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
|
|
|
+ kfree_skb(skb);
|
|
|
+ goto err_out;
|
|
|
+ }
|
|
|
cp->rx_skb[i] = skb;
|
|
|
|
|
|
cp->rx_ring[i].opts2 = 0;
|