|
@@ -2630,14 +2630,15 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
|
|
|
tx_buf = &txr->tx_buf_ring[sw_ring_cons];
|
|
|
skb = tx_buf->skb;
|
|
|
|
|
|
+ /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
|
|
|
+ prefetch(&skb->end);
|
|
|
+
|
|
|
/* partial BD completions possible with TSO packets */
|
|
|
- if (skb_is_gso(skb)) {
|
|
|
+ if (tx_buf->is_gso) {
|
|
|
u16 last_idx, last_ring_idx;
|
|
|
|
|
|
- last_idx = sw_cons +
|
|
|
- skb_shinfo(skb)->nr_frags + 1;
|
|
|
- last_ring_idx = sw_ring_cons +
|
|
|
- skb_shinfo(skb)->nr_frags + 1;
|
|
|
+ last_idx = sw_cons + tx_buf->nr_frags + 1;
|
|
|
+ last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
|
|
|
if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
|
|
|
last_idx++;
|
|
|
}
|
|
@@ -2649,7 +2650,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
|
|
|
skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
|
|
|
|
|
|
tx_buf->skb = NULL;
|
|
|
- last = skb_shinfo(skb)->nr_frags;
|
|
|
+ last = tx_buf->nr_frags;
|
|
|
|
|
|
for (i = 0; i < last; i++) {
|
|
|
sw_cons = NEXT_TX_BD(sw_cons);
|
|
@@ -2662,7 +2663,8 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
|
|
|
if (tx_pkt == budget)
|
|
|
break;
|
|
|
|
|
|
- hw_cons = bnx2_get_hw_tx_cons(bnapi);
|
|
|
+ if (hw_cons == sw_cons)
|
|
|
+ hw_cons = bnx2_get_hw_tx_cons(bnapi);
|
|
|
}
|
|
|
|
|
|
txr->hw_tx_cons = hw_cons;
|
|
@@ -6179,6 +6181,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
|
|
|
|
|
|
last_frag = skb_shinfo(skb)->nr_frags;
|
|
|
+ tx_buf->nr_frags = last_frag;
|
|
|
+ tx_buf->is_gso = skb_is_gso(skb);
|
|
|
|
|
|
for (i = 0; i < last_frag; i++) {
|
|
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|