|
@@ -4357,7 +4357,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
|
|
txq = netdev_get_tx_queue(tp->dev, index);
|
|
txq = netdev_get_tx_queue(tp->dev, index);
|
|
|
|
|
|
while (sw_idx != hw_idx) {
|
|
while (sw_idx != hw_idx) {
|
|
- struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
|
|
|
|
|
|
+ struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
|
|
struct sk_buff *skb = ri->skb;
|
|
struct sk_buff *skb = ri->skb;
|
|
int i, tx_bug = 0;
|
|
int i, tx_bug = 0;
|
|
|
|
|
|
@@ -4366,7 +4366,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
|
|
|
|
|
|
+ pci_unmap_single(tp->pdev,
|
|
|
|
+ pci_unmap_addr(ri, mapping),
|
|
|
|
+ skb_headlen(skb),
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
|
|
|
ri->skb = NULL;
|
|
ri->skb = NULL;
|
|
|
|
|
|
@@ -4376,6 +4379,11 @@ static void tg3_tx(struct tg3_napi *tnapi)
|
|
ri = &tnapi->tx_buffers[sw_idx];
|
|
ri = &tnapi->tx_buffers[sw_idx];
|
|
if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
|
|
if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
|
|
tx_bug = 1;
|
|
tx_bug = 1;
|
|
|
|
+
|
|
|
|
+ pci_unmap_page(tp->pdev,
|
|
|
|
+ pci_unmap_addr(ri, mapping),
|
|
|
|
+ skb_shinfo(skb)->frags[i].size,
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
sw_idx = NEXT_TX(sw_idx);
|
|
sw_idx = NEXT_TX(sw_idx);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5334,17 +5342,21 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
|
|
} else {
|
|
} else {
|
|
/* New SKB is guaranteed to be linear. */
|
|
/* New SKB is guaranteed to be linear. */
|
|
entry = *start;
|
|
entry = *start;
|
|
- ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
|
|
|
|
- new_addr = skb_shinfo(new_skb)->dma_head;
|
|
|
|
|
|
+ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
|
+ /* Make sure the mapping succeeded */
|
|
|
|
+ if (pci_dma_mapping_error(tp->pdev, new_addr)) {
|
|
|
|
+ ret = -1;
|
|
|
|
+ dev_kfree_skb(new_skb);
|
|
|
|
+ new_skb = NULL;
|
|
|
|
|
|
/* Make sure new skb does not cross any 4G boundaries.
|
|
/* Make sure new skb does not cross any 4G boundaries.
|
|
* Drop the packet if it does.
|
|
* Drop the packet if it does.
|
|
*/
|
|
*/
|
|
- if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
|
|
|
|
- tg3_4g_overflow_test(new_addr, new_skb->len))) {
|
|
|
|
- if (!ret)
|
|
|
|
- skb_dma_unmap(&tp->pdev->dev, new_skb,
|
|
|
|
- DMA_TO_DEVICE);
|
|
|
|
|
|
+ } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
|
|
|
|
+ tg3_4g_overflow_test(new_addr, new_skb->len)) {
|
|
|
|
+ pci_unmap_single(tp->pdev, new_addr, new_skb->len,
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
ret = -1;
|
|
ret = -1;
|
|
dev_kfree_skb(new_skb);
|
|
dev_kfree_skb(new_skb);
|
|
new_skb = NULL;
|
|
new_skb = NULL;
|
|
@@ -5358,15 +5370,28 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
|
|
/* Now clean up the sw ring entries. */
|
|
/* Now clean up the sw ring entries. */
|
|
i = 0;
|
|
i = 0;
|
|
while (entry != last_plus_one) {
|
|
while (entry != last_plus_one) {
|
|
|
|
+ int len;
|
|
|
|
+
|
|
if (i == 0)
|
|
if (i == 0)
|
|
- tnapi->tx_buffers[entry].skb = new_skb;
|
|
|
|
|
|
+ len = skb_headlen(skb);
|
|
else
|
|
else
|
|
|
|
+ len = skb_shinfo(skb)->frags[i-1].size;
|
|
|
|
+
|
|
|
|
+ pci_unmap_single(tp->pdev,
|
|
|
|
+ pci_unmap_addr(&tnapi->tx_buffers[entry],
|
|
|
|
+ mapping),
|
|
|
|
+ len, PCI_DMA_TODEVICE);
|
|
|
|
+ if (i == 0) {
|
|
|
|
+ tnapi->tx_buffers[entry].skb = new_skb;
|
|
|
|
+ pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
|
|
|
|
+ new_addr);
|
|
|
|
+ } else {
|
|
tnapi->tx_buffers[entry].skb = NULL;
|
|
tnapi->tx_buffers[entry].skb = NULL;
|
|
|
|
+ }
|
|
entry = NEXT_TX(entry);
|
|
entry = NEXT_TX(entry);
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
|
|
|
|
- skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
|
|
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -5403,10 +5428,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
|
|
{
|
|
{
|
|
struct tg3 *tp = netdev_priv(dev);
|
|
struct tg3 *tp = netdev_priv(dev);
|
|
u32 len, entry, base_flags, mss;
|
|
u32 len, entry, base_flags, mss;
|
|
- struct skb_shared_info *sp;
|
|
|
|
dma_addr_t mapping;
|
|
dma_addr_t mapping;
|
|
struct tg3_napi *tnapi;
|
|
struct tg3_napi *tnapi;
|
|
struct netdev_queue *txq;
|
|
struct netdev_queue *txq;
|
|
|
|
+ unsigned int i, last;
|
|
|
|
+
|
|
|
|
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
|
|
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
|
|
@@ -5477,18 +5503,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
|
|
(vlan_tx_tag_get(skb) << 16));
|
|
(vlan_tx_tag_get(skb) << 16));
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
|
|
|
|
|
|
+ len = skb_headlen(skb);
|
|
|
|
+
|
|
|
|
+ /* Queue skb data, a.k.a. the main skb fragment. */
|
|
|
|
+ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
|
|
|
+ if (pci_dma_mapping_error(tp->pdev, mapping)) {
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
|
|
|
|
- sp = skb_shinfo(skb);
|
|
|
|
-
|
|
|
|
- mapping = sp->dma_head;
|
|
|
|
-
|
|
|
|
tnapi->tx_buffers[entry].skb = skb;
|
|
tnapi->tx_buffers[entry].skb = skb;
|
|
-
|
|
|
|
- len = skb_headlen(skb);
|
|
|
|
|
|
+ pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
|
|
|
|
|
|
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
|
|
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
|
|
!mss && skb->len > ETH_DATA_LEN)
|
|
!mss && skb->len > ETH_DATA_LEN)
|
|
@@ -5501,15 +5526,21 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
|
|
|
|
|
|
/* Now loop through additional data fragments, and queue them. */
|
|
/* Now loop through additional data fragments, and queue them. */
|
|
if (skb_shinfo(skb)->nr_frags > 0) {
|
|
if (skb_shinfo(skb)->nr_frags > 0) {
|
|
- unsigned int i, last;
|
|
|
|
-
|
|
|
|
last = skb_shinfo(skb)->nr_frags - 1;
|
|
last = skb_shinfo(skb)->nr_frags - 1;
|
|
for (i = 0; i <= last; i++) {
|
|
for (i = 0; i <= last; i++) {
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
|
|
len = frag->size;
|
|
len = frag->size;
|
|
- mapping = sp->dma_maps[i];
|
|
|
|
|
|
+ mapping = pci_map_page(tp->pdev,
|
|
|
|
+ frag->page,
|
|
|
|
+ frag->page_offset,
|
|
|
|
+ len, PCI_DMA_TODEVICE);
|
|
|
|
+ if (pci_dma_mapping_error(tp->pdev, mapping))
|
|
|
|
+ goto dma_error;
|
|
|
|
+
|
|
tnapi->tx_buffers[entry].skb = NULL;
|
|
tnapi->tx_buffers[entry].skb = NULL;
|
|
|
|
+ pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
|
|
|
|
+ mapping);
|
|
|
|
|
|
tg3_set_txd(tnapi, entry, mapping, len,
|
|
tg3_set_txd(tnapi, entry, mapping, len,
|
|
base_flags, (i == last) | (mss << 1));
|
|
base_flags, (i == last) | (mss << 1));
|
|
@@ -5532,6 +5563,27 @@ out_unlock:
|
|
mmiowb();
|
|
mmiowb();
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
return NETDEV_TX_OK;
|
|
|
|
+
|
|
|
|
+dma_error:
|
|
|
|
+ last = i;
|
|
|
|
+ entry = tnapi->tx_prod;
|
|
|
|
+ tnapi->tx_buffers[entry].skb = NULL;
|
|
|
|
+ pci_unmap_single(tp->pdev,
|
|
|
|
+ pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
|
|
|
|
+ skb_headlen(skb),
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
|
+ for (i = 0; i <= last; i++) {
|
|
|
|
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
+ entry = NEXT_TX(entry);
|
|
|
|
+
|
|
|
|
+ pci_unmap_page(tp->pdev,
|
|
|
|
+ pci_unmap_addr(&tnapi->tx_buffers[entry],
|
|
|
|
+ mapping),
|
|
|
|
+ frag->size, PCI_DMA_TODEVICE);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ dev_kfree_skb(skb);
|
|
|
|
+ return NETDEV_TX_OK;
|
|
}
|
|
}
|
|
|
|
|
|
static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
|
|
static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
|
|
@@ -5579,11 +5631,12 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
|
|
{
|
|
{
|
|
struct tg3 *tp = netdev_priv(dev);
|
|
struct tg3 *tp = netdev_priv(dev);
|
|
u32 len, entry, base_flags, mss;
|
|
u32 len, entry, base_flags, mss;
|
|
- struct skb_shared_info *sp;
|
|
|
|
int would_hit_hwbug;
|
|
int would_hit_hwbug;
|
|
dma_addr_t mapping;
|
|
dma_addr_t mapping;
|
|
struct tg3_napi *tnapi;
|
|
struct tg3_napi *tnapi;
|
|
struct netdev_queue *txq;
|
|
struct netdev_queue *txq;
|
|
|
|
+ unsigned int i, last;
|
|
|
|
+
|
|
|
|
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
|
|
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
|
|
@@ -5678,21 +5731,19 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
|
|
!mss && skb->len > ETH_DATA_LEN)
|
|
!mss && skb->len > ETH_DATA_LEN)
|
|
base_flags |= TXD_FLAG_JMB_PKT;
|
|
base_flags |= TXD_FLAG_JMB_PKT;
|
|
|
|
|
|
- if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
|
|
|
|
|
|
+ len = skb_headlen(skb);
|
|
|
|
+
|
|
|
|
+ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
|
|
|
+ if (pci_dma_mapping_error(tp->pdev, mapping)) {
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
|
|
|
|
- sp = skb_shinfo(skb);
|
|
|
|
-
|
|
|
|
- mapping = sp->dma_head;
|
|
|
|
-
|
|
|
|
tnapi->tx_buffers[entry].skb = skb;
|
|
tnapi->tx_buffers[entry].skb = skb;
|
|
|
|
+ pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
|
|
|
|
|
|
would_hit_hwbug = 0;
|
|
would_hit_hwbug = 0;
|
|
|
|
|
|
- len = skb_headlen(skb);
|
|
|
|
-
|
|
|
|
if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
|
|
if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
|
|
would_hit_hwbug = 1;
|
|
would_hit_hwbug = 1;
|
|
|
|
|
|
@@ -5714,16 +5765,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
|
|
|
|
|
|
/* Now loop through additional data fragments, and queue them. */
|
|
/* Now loop through additional data fragments, and queue them. */
|
|
if (skb_shinfo(skb)->nr_frags > 0) {
|
|
if (skb_shinfo(skb)->nr_frags > 0) {
|
|
- unsigned int i, last;
|
|
|
|
-
|
|
|
|
last = skb_shinfo(skb)->nr_frags - 1;
|
|
last = skb_shinfo(skb)->nr_frags - 1;
|
|
for (i = 0; i <= last; i++) {
|
|
for (i = 0; i <= last; i++) {
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
|
|
len = frag->size;
|
|
len = frag->size;
|
|
- mapping = sp->dma_maps[i];
|
|
|
|
|
|
+ mapping = pci_map_page(tp->pdev,
|
|
|
|
+ frag->page,
|
|
|
|
+ frag->page_offset,
|
|
|
|
+ len, PCI_DMA_TODEVICE);
|
|
|
|
|
|
tnapi->tx_buffers[entry].skb = NULL;
|
|
tnapi->tx_buffers[entry].skb = NULL;
|
|
|
|
+ pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
|
|
|
|
+ mapping);
|
|
|
|
+ if (pci_dma_mapping_error(tp->pdev, mapping))
|
|
|
|
+ goto dma_error;
|
|
|
|
|
|
if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
|
|
if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
|
|
len <= 8)
|
|
len <= 8)
|
|
@@ -5779,6 +5835,27 @@ out_unlock:
|
|
mmiowb();
|
|
mmiowb();
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
return NETDEV_TX_OK;
|
|
|
|
+
|
|
|
|
+dma_error:
|
|
|
|
+ last = i;
|
|
|
|
+ entry = tnapi->tx_prod;
|
|
|
|
+ tnapi->tx_buffers[entry].skb = NULL;
|
|
|
|
+ pci_unmap_single(tp->pdev,
|
|
|
|
+ pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
|
|
|
|
+ skb_headlen(skb),
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
|
+ for (i = 0; i <= last; i++) {
|
|
|
|
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
|
+ entry = NEXT_TX(entry);
|
|
|
|
+
|
|
|
|
+ pci_unmap_page(tp->pdev,
|
|
|
|
+ pci_unmap_addr(&tnapi->tx_buffers[entry],
|
|
|
|
+ mapping),
|
|
|
|
+ frag->size, PCI_DMA_TODEVICE);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ dev_kfree_skb(skb);
|
|
|
|
+ return NETDEV_TX_OK;
|
|
}
|
|
}
|
|
|
|
|
|
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
|
|
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
|
|
@@ -6046,8 +6123,9 @@ static void tg3_free_rings(struct tg3 *tp)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
for (i = 0; i < TG3_TX_RING_SIZE; ) {
|
|
for (i = 0; i < TG3_TX_RING_SIZE; ) {
|
|
- struct tx_ring_info *txp;
|
|
|
|
|
|
+ struct ring_info *txp;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
+ unsigned int k;
|
|
|
|
|
|
txp = &tnapi->tx_buffers[i];
|
|
txp = &tnapi->tx_buffers[i];
|
|
skb = txp->skb;
|
|
skb = txp->skb;
|
|
@@ -6057,11 +6135,22 @@ static void tg3_free_rings(struct tg3 *tp)
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
|
|
|
|
-
|
|
|
|
|
|
+ pci_unmap_single(tp->pdev,
|
|
|
|
+ pci_unmap_addr(txp, mapping),
|
|
|
|
+ skb_headlen(skb),
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
txp->skb = NULL;
|
|
txp->skb = NULL;
|
|
|
|
|
|
- i += skb_shinfo(skb)->nr_frags + 1;
|
|
|
|
|
|
+ i++;
|
|
|
|
+
|
|
|
|
+ for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
|
|
|
|
+ txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
|
|
|
|
+ pci_unmap_page(tp->pdev,
|
|
|
|
+ pci_unmap_addr(txp, mapping),
|
|
|
|
+ skb_shinfo(skb)->frags[k].size,
|
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
|
+ i++;
|
|
|
|
+ }
|
|
|
|
|
|
dev_kfree_skb_any(skb);
|
|
dev_kfree_skb_any(skb);
|
|
}
|
|
}
|
|
@@ -6231,7 +6320,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
|
|
|
|
|
|
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
|
|
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
|
|
|
|
|
|
- tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
|
|
|
|
|
|
+ tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
|
|
TG3_TX_RING_SIZE, GFP_KERNEL);
|
|
TG3_TX_RING_SIZE, GFP_KERNEL);
|
|
if (!tnapi->tx_buffers)
|
|
if (!tnapi->tx_buffers)
|
|
goto err_out;
|
|
goto err_out;
|
|
@@ -10637,7 +10726,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
|
|
for (i = 14; i < tx_len; i++)
|
|
for (i = 14; i < tx_len; i++)
|
|
tx_data[i] = (u8) (i & 0xff);
|
|
tx_data[i] = (u8) (i & 0xff);
|
|
|
|
|
|
- if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
|
|
|
|
|
|
+ map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
|
|
|
|
+ if (pci_dma_mapping_error(tp->pdev, map)) {
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
return -EIO;
|
|
return -EIO;
|
|
}
|
|
}
|
|
@@ -10651,8 +10741,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
|
|
|
|
|
|
num_pkts = 0;
|
|
num_pkts = 0;
|
|
|
|
|
|
- tg3_set_txd(tnapi, tnapi->tx_prod,
|
|
|
|
- skb_shinfo(skb)->dma_head, tx_len, 0, 1);
|
|
|
|
|
|
+ tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
|
|
|
|
|
|
tnapi->tx_prod++;
|
|
tnapi->tx_prod++;
|
|
num_pkts++;
|
|
num_pkts++;
|
|
@@ -10676,7 +10765,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
|
|
|
|
|
|
+ pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
|
|
|
|
if (tx_idx != tnapi->tx_prod)
|
|
if (tx_idx != tnapi->tx_prod)
|