|
@@ -2040,8 +2040,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|
|
struct netdev_queue *txq)
|
|
|
{
|
|
|
spinlock_t *root_lock = qdisc_lock(q);
|
|
|
+ bool contended = qdisc_is_running(q);
|
|
|
int rc;
|
|
|
|
|
|
+ /*
|
|
|
+ * Heuristic to force contended enqueues to serialize on a
|
|
|
+ * separate lock before trying to get qdisc main lock.
|
|
|
+ * This permits __QDISC_STATE_RUNNING owner to get the lock more often
|
|
|
+ * and dequeue packets faster.
|
|
|
+ */
|
|
|
+ if (unlikely(contended))
|
|
|
+ spin_lock(&q->busylock);
|
|
|
+
|
|
|
spin_lock(root_lock);
|
|
|
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
|
|
|
kfree_skb(skb);
|
|
@@ -2056,19 +2066,30 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|
|
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
|
|
|
skb_dst_force(skb);
|
|
|
__qdisc_update_bstats(q, skb->len);
|
|
|
- if (sch_direct_xmit(skb, q, dev, txq, root_lock))
|
|
|
+ if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
|
|
|
+ if (unlikely(contended)) {
|
|
|
+ spin_unlock(&q->busylock);
|
|
|
+ contended = false;
|
|
|
+ }
|
|
|
__qdisc_run(q);
|
|
|
- else
|
|
|
+ } else
|
|
|
qdisc_run_end(q);
|
|
|
|
|
|
rc = NET_XMIT_SUCCESS;
|
|
|
} else {
|
|
|
skb_dst_force(skb);
|
|
|
rc = qdisc_enqueue_root(skb, q);
|
|
|
- qdisc_run(q);
|
|
|
+ if (qdisc_run_begin(q)) {
|
|
|
+ if (unlikely(contended)) {
|
|
|
+ spin_unlock(&q->busylock);
|
|
|
+ contended = false;
|
|
|
+ }
|
|
|
+ __qdisc_run(q);
|
|
|
+ }
|
|
|
}
|
|
|
spin_unlock(root_lock);
|
|
|
-
|
|
|
+ if (unlikely(contended))
|
|
|
+ spin_unlock(&q->busylock);
|
|
|
return rc;
|
|
|
}
|
|
|
|