|
@@ -45,7 +45,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
|
|
|
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
|
|
|
{
|
|
|
if (unlikely(skb->next))
|
|
|
- q->gso_skb = skb;
|
|
|
+ __skb_queue_head(&q->requeue, skb);
|
|
|
else
|
|
|
q->ops->requeue(skb, q);
|
|
|
|
|
@@ -57,9 +57,8 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
|
|
|
{
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
- if ((skb = q->gso_skb))
|
|
|
- q->gso_skb = NULL;
|
|
|
- else
|
|
|
+ skb = __skb_dequeue(&q->requeue);
|
|
|
+ if (!skb)
|
|
|
skb = q->dequeue(q);
|
|
|
|
|
|
return skb;
|
|
@@ -327,6 +326,7 @@ struct Qdisc noop_qdisc = {
|
|
|
.flags = TCQ_F_BUILTIN,
|
|
|
.ops = &noop_qdisc_ops,
|
|
|
.list = LIST_HEAD_INIT(noop_qdisc.list),
|
|
|
+ .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
|
|
|
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
|
|
|
.dev_queue = &noop_netdev_queue,
|
|
|
};
|
|
@@ -352,6 +352,7 @@ static struct Qdisc noqueue_qdisc = {
|
|
|
.flags = TCQ_F_BUILTIN,
|
|
|
.ops = &noqueue_qdisc_ops,
|
|
|
.list = LIST_HEAD_INIT(noqueue_qdisc.list),
|
|
|
+ .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
|
|
|
.q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
|
|
|
.dev_queue = &noqueue_netdev_queue,
|
|
|
};
|
|
@@ -472,6 +473,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
|
|
sch->padded = (char *) sch - (char *) p;
|
|
|
|
|
|
INIT_LIST_HEAD(&sch->list);
|
|
|
+ skb_queue_head_init(&sch->requeue);
|
|
|
skb_queue_head_init(&sch->q);
|
|
|
sch->ops = ops;
|
|
|
sch->enqueue = ops->enqueue;
|
|
@@ -539,7 +541,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
|
|
|
module_put(ops->owner);
|
|
|
dev_put(qdisc_dev(qdisc));
|
|
|
|
|
|
- kfree_skb(qdisc->gso_skb);
|
|
|
+ __skb_queue_purge(&qdisc->requeue);
|
|
|
|
|
|
kfree((char *) qdisc - qdisc->padded);
|
|
|
}
|