|
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
|
|
|
|
|
|
/* check the reason of requeuing without tx lock first */
|
|
/* check the reason of requeuing without tx lock first */
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
- if (!netif_tx_queue_stopped(txq) &&
|
|
|
|
- !netif_tx_queue_frozen(txq)) {
|
|
|
|
|
|
+ if (!netif_tx_queue_frozen_or_stopped(txq)) {
|
|
q->gso_skb = NULL;
|
|
q->gso_skb = NULL;
|
|
q->q.qlen--;
|
|
q->q.qlen--;
|
|
} else
|
|
} else
|
|
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
|
spin_unlock(root_lock);
|
|
spin_unlock(root_lock);
|
|
|
|
|
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
|
- if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
|
|
|
|
|
|
+ if (!netif_tx_queue_frozen_or_stopped(txq))
|
|
ret = dev_hard_start_xmit(skb, dev, txq);
|
|
ret = dev_hard_start_xmit(skb, dev, txq);
|
|
|
|
|
|
HARD_TX_UNLOCK(dev, txq);
|
|
HARD_TX_UNLOCK(dev, txq);
|
|
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
|
ret = dev_requeue_skb(skb, q);
|
|
ret = dev_requeue_skb(skb, q);
|
|
}
|
|
}
|
|
|
|
|
|
- if (ret && (netif_tx_queue_stopped(txq) ||
|
|
|
|
- netif_tx_queue_frozen(txq)))
|
|
|
|
|
|
+ if (ret && netif_tx_queue_frozen_or_stopped(txq))
|
|
ret = 0;
|
|
ret = 0;
|
|
|
|
|
|
return ret;
|
|
return ret;
|