|
@@ -61,8 +61,12 @@
|
|
|
#define R8169_MSG_DEFAULT \
|
|
|
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
|
|
|
|
|
|
-#define TX_BUFFS_AVAIL(tp) \
|
|
|
- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
|
|
|
+#define TX_SLOTS_AVAIL(tp) \
|
|
|
+ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
|
|
|
+
|
|
|
+/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
|
|
|
+#define TX_FRAGS_READY_FOR(tp,nr_frags) \
|
|
|
+ (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
|
|
|
|
|
|
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
|
|
|
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
|
|
@@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|
|
u32 opts[2];
|
|
|
int frags;
|
|
|
|
|
|
- if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
|
|
|
+ if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
|
|
|
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
|
|
|
goto err_stop_0;
|
|
|
}
|
|
@@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|
|
|
|
|
mmiowb();
|
|
|
|
|
|
- if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
|
|
|
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
|
|
|
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
|
|
* not miss a ring update when it notices a stopped queue.
|
|
|
*/
|
|
@@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|
|
* can't.
|
|
|
*/
|
|
|
smp_mb();
|
|
|
- if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
|
|
|
+ if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
|
|
|
netif_wake_queue(dev);
|
|
|
}
|
|
|
|
|
@@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
|
|
*/
|
|
|
smp_mb();
|
|
|
if (netif_queue_stopped(dev) &&
|
|
|
- (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
|
|
|
+ TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
|
|
|
netif_wake_queue(dev);
|
|
|
}
|
|
|
/*
|