|
@@ -37,15 +37,11 @@
|
|
|
* - updates to tree and tree walking are only done under the rtnl mutex.
|
|
|
*/
|
|
|
|
|
|
-static inline int qdisc_qlen(struct Qdisc *q)
|
|
|
-{
|
|
|
- return q->q.qlen;
|
|
|
-}
|
|
|
-
|
|
|
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
|
|
|
{
|
|
|
q->gso_skb = skb;
|
|
|
q->qstats.requeues++;
|
|
|
+ q->q.qlen++; /* it's still part of the queue */
|
|
|
__netif_schedule(q);
|
|
|
|
|
|
return 0;
|
|
@@ -61,9 +57,11 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
|
|
|
|
|
|
/* check the reason of requeuing without tx lock first */
|
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
|
- if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
|
|
|
+ if (!netif_tx_queue_stopped(txq) &&
|
|
|
+ !netif_tx_queue_frozen(txq)) {
|
|
|
q->gso_skb = NULL;
|
|
|
- else
|
|
|
+ q->q.qlen--;
|
|
|
+ } else
|
|
|
skb = NULL;
|
|
|
} else {
|
|
|
skb = q->dequeue(q);
|
|
@@ -103,44 +101,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * NOTE: Called under qdisc_lock(q) with locally disabled BH.
|
|
|
- *
|
|
|
- * __QDISC_STATE_RUNNING guarantees only one CPU can process
|
|
|
- * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
|
|
|
- * this queue.
|
|
|
- *
|
|
|
- * netif_tx_lock serializes accesses to device driver.
|
|
|
- *
|
|
|
- * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
|
|
|
- * if one is grabbed, another must be free.
|
|
|
- *
|
|
|
- * Note, that this procedure can be called by a watchdog timer
|
|
|
+ * Transmit one skb, and handle the return status as required. Holding the
|
|
|
+ * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
|
|
|
+ * function.
|
|
|
*
|
|
|
* Returns to the caller:
|
|
|
* 0 - queue is empty or throttled.
|
|
|
* >0 - queue is not empty.
|
|
|
- *
|
|
|
*/
|
|
|
-static inline int qdisc_restart(struct Qdisc *q)
|
|
|
+int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
|
|
|
+ struct net_device *dev, struct netdev_queue *txq,
|
|
|
+ spinlock_t *root_lock)
|
|
|
{
|
|
|
- struct netdev_queue *txq;
|
|
|
int ret = NETDEV_TX_BUSY;
|
|
|
- struct net_device *dev;
|
|
|
- spinlock_t *root_lock;
|
|
|
- struct sk_buff *skb;
|
|
|
-
|
|
|
- /* Dequeue packet */
|
|
|
- if (unlikely((skb = dequeue_skb(q)) == NULL))
|
|
|
- return 0;
|
|
|
-
|
|
|
- root_lock = qdisc_lock(q);
|
|
|
|
|
|
/* And release qdisc */
|
|
|
spin_unlock(root_lock);
|
|
|
|
|
|
- dev = qdisc_dev(q);
|
|
|
- txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
|
-
|
|
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
|
|
if (!netif_tx_queue_stopped(txq) &&
|
|
|
!netif_tx_queue_frozen(txq))
|
|
@@ -177,6 +154,44 @@ static inline int qdisc_restart(struct Qdisc *q)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * NOTE: Called under qdisc_lock(q) with locally disabled BH.
|
|
|
+ *
|
|
|
+ * __QDISC_STATE_RUNNING guarantees only one CPU can process
|
|
|
+ * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
|
|
|
+ * this queue.
|
|
|
+ *
|
|
|
+ * netif_tx_lock serializes accesses to device driver.
|
|
|
+ *
|
|
|
+ * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
|
|
|
+ * if one is grabbed, another must be free.
|
|
|
+ *
|
|
|
+ * Note, that this procedure can be called by a watchdog timer
|
|
|
+ *
|
|
|
+ * Returns to the caller:
|
|
|
+ * 0 - queue is empty or throttled.
|
|
|
+ * >0 - queue is not empty.
|
|
|
+ *
|
|
|
+ */
|
|
|
+static inline int qdisc_restart(struct Qdisc *q)
|
|
|
+{
|
|
|
+ struct netdev_queue *txq;
|
|
|
+ struct net_device *dev;
|
|
|
+ spinlock_t *root_lock;
|
|
|
+ struct sk_buff *skb;
|
|
|
+
|
|
|
+ /* Dequeue packet */
|
|
|
+ skb = dequeue_skb(q);
|
|
|
+ if (unlikely(!skb))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ root_lock = qdisc_lock(q);
|
|
|
+ dev = qdisc_dev(q);
|
|
|
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
|
+
|
|
|
+ return sch_direct_xmit(skb, q, dev, txq, root_lock);
|
|
|
+}
|
|
|
+
|
|
|
void __qdisc_run(struct Qdisc *q)
|
|
|
{
|
|
|
unsigned long start_time = jiffies;
|
|
@@ -547,8 +562,11 @@ void qdisc_reset(struct Qdisc *qdisc)
|
|
|
if (ops->reset)
|
|
|
ops->reset(qdisc);
|
|
|
|
|
|
- kfree_skb(qdisc->gso_skb);
|
|
|
- qdisc->gso_skb = NULL;
|
|
|
+ if (qdisc->gso_skb) {
|
|
|
+ kfree_skb(qdisc->gso_skb);
|
|
|
+ qdisc->gso_skb = NULL;
|
|
|
+ qdisc->q.qlen = 0;
|
|
|
+ }
|
|
|
}
|
|
|
EXPORT_SYMBOL(qdisc_reset);
|
|
|
|
|
@@ -605,6 +623,9 @@ static void attach_one_default_qdisc(struct net_device *dev,
|
|
|
printk(KERN_INFO "%s: activation failed\n", dev->name);
|
|
|
return;
|
|
|
}
|
|
|
+
|
|
|
+ /* Can by-pass the queue discipline for default qdisc */
|
|
|
+ qdisc->flags |= TCQ_F_CAN_BYPASS;
|
|
|
} else {
|
|
|
qdisc = &noqueue_qdisc;
|
|
|
}
|