|
@@ -53,7 +53,6 @@
|
|
|
|
|
|
struct netem_sched_data {
|
|
struct netem_sched_data {
|
|
struct Qdisc *qdisc;
|
|
struct Qdisc *qdisc;
|
|
- struct sk_buff_head delayed;
|
|
|
|
struct timer_list timer;
|
|
struct timer_list timer;
|
|
|
|
|
|
u32 latency;
|
|
u32 latency;
|
|
@@ -137,72 +136,6 @@ static long tabledist(unsigned long mu, long sigma,
|
|
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
|
|
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
|
|
}
|
|
}
|
|
|
|
|
|
-/* Put skb in the private delayed queue. */
|
|
|
|
-static int netem_delay(struct Qdisc *sch, struct sk_buff *skb)
|
|
|
|
-{
|
|
|
|
- struct netem_sched_data *q = qdisc_priv(sch);
|
|
|
|
- psched_tdiff_t td;
|
|
|
|
- psched_time_t now;
|
|
|
|
-
|
|
|
|
- PSCHED_GET_TIME(now);
|
|
|
|
- td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist);
|
|
|
|
-
|
|
|
|
- /* Always queue at tail to keep packets in order */
|
|
|
|
- if (likely(q->delayed.qlen < q->limit)) {
|
|
|
|
- struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
|
|
|
|
-
|
|
|
|
- PSCHED_TADD2(now, td, cb->time_to_send);
|
|
|
|
-
|
|
|
|
- pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb,
|
|
|
|
- now, cb->time_to_send);
|
|
|
|
-
|
|
|
|
- __skb_queue_tail(&q->delayed, skb);
|
|
|
|
- return NET_XMIT_SUCCESS;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- pr_debug("netem_delay: queue over limit %d\n", q->limit);
|
|
|
|
- sch->qstats.overlimits++;
|
|
|
|
- kfree_skb(skb);
|
|
|
|
- return NET_XMIT_DROP;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Move a packet that is ready to send from the delay holding
|
|
|
|
- * list to the underlying qdisc.
|
|
|
|
- */
|
|
|
|
-static int netem_run(struct Qdisc *sch)
|
|
|
|
-{
|
|
|
|
- struct netem_sched_data *q = qdisc_priv(sch);
|
|
|
|
- struct sk_buff *skb;
|
|
|
|
- psched_time_t now;
|
|
|
|
-
|
|
|
|
- PSCHED_GET_TIME(now);
|
|
|
|
-
|
|
|
|
- skb = skb_peek(&q->delayed);
|
|
|
|
- if (skb) {
|
|
|
|
- const struct netem_skb_cb *cb
|
|
|
|
- = (const struct netem_skb_cb *)skb->cb;
|
|
|
|
- long delay
|
|
|
|
- = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
|
|
|
|
- pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
|
|
|
|
-
|
|
|
|
- /* if more time remaining? */
|
|
|
|
- if (delay > 0) {
|
|
|
|
- mod_timer(&q->timer, jiffies + delay);
|
|
|
|
- return 1;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- __skb_unlink(skb, &q->delayed);
|
|
|
|
-
|
|
|
|
- if (q->qdisc->enqueue(skb, q->qdisc)) {
|
|
|
|
- sch->q.qlen--;
|
|
|
|
- sch->qstats.drops++;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Insert one skb into qdisc.
|
|
* Insert one skb into qdisc.
|
|
* Note: parent depends on return value to account for queue length.
|
|
* Note: parent depends on return value to account for queue length.
|
|
@@ -212,6 +145,7 @@ static int netem_run(struct Qdisc *sch)
|
|
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
{
|
|
{
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
|
|
+ struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
|
|
struct sk_buff *skb2;
|
|
struct sk_buff *skb2;
|
|
int ret;
|
|
int ret;
|
|
int count = 1;
|
|
int count = 1;
|
|
@@ -246,18 +180,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
q->duplicate = dupsave;
|
|
q->duplicate = dupsave;
|
|
}
|
|
}
|
|
|
|
|
|
- /* If doing simple delay then gap == 0 so all packets
|
|
|
|
- * go into the delayed holding queue
|
|
|
|
- * otherwise if doing out of order only "1 out of gap"
|
|
|
|
- * packets will be delayed.
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Do re-ordering by putting one out of N packets at the front
|
|
|
|
+ * of the queue.
|
|
|
|
+ * gap == 0 is special case for no-reordering.
|
|
*/
|
|
*/
|
|
- if (q->counter < q->gap) {
|
|
|
|
|
|
+ if (q->gap == 0 || q->counter != q->gap) {
|
|
|
|
+ psched_time_t now;
|
|
|
|
+ PSCHED_GET_TIME(now);
|
|
|
|
+ PSCHED_TADD2(now,
|
|
|
|
+ tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist),
|
|
|
|
+ cb->time_to_send);
|
|
|
|
+
|
|
++q->counter;
|
|
++q->counter;
|
|
ret = q->qdisc->enqueue(skb, q->qdisc);
|
|
ret = q->qdisc->enqueue(skb, q->qdisc);
|
|
} else {
|
|
} else {
|
|
q->counter = 0;
|
|
q->counter = 0;
|
|
- ret = netem_delay(sch, skb);
|
|
|
|
- netem_run(sch);
|
|
|
|
|
|
+ PSCHED_GET_TIME(cb->time_to_send);
|
|
|
|
+ ret = q->qdisc->ops->requeue(skb, q->qdisc);
|
|
}
|
|
}
|
|
|
|
|
|
if (likely(ret == NET_XMIT_SUCCESS)) {
|
|
if (likely(ret == NET_XMIT_SUCCESS)) {
|
|
@@ -301,22 +241,33 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
|
|
{
|
|
{
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
- int pending;
|
|
|
|
-
|
|
|
|
- pending = netem_run(sch);
|
|
|
|
|
|
|
|
skb = q->qdisc->dequeue(q->qdisc);
|
|
skb = q->qdisc->dequeue(q->qdisc);
|
|
if (skb) {
|
|
if (skb) {
|
|
- pr_debug("netem_dequeue: return skb=%p\n", skb);
|
|
|
|
- sch->q.qlen--;
|
|
|
|
- sch->flags &= ~TCQ_F_THROTTLED;
|
|
|
|
- }
|
|
|
|
- else if (pending) {
|
|
|
|
- pr_debug("netem_dequeue: throttling\n");
|
|
|
|
|
|
+ const struct netem_skb_cb *cb
|
|
|
|
+ = (const struct netem_skb_cb *)skb->cb;
|
|
|
|
+ psched_time_t now;
|
|
|
|
+ long delay;
|
|
|
|
+
|
|
|
|
+ /* if more time remaining? */
|
|
|
|
+ PSCHED_GET_TIME(now);
|
|
|
|
+ delay = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
|
|
|
|
+ pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
|
|
|
|
+ if (delay <= 0) {
|
|
|
|
+ pr_debug("netem_dequeue: return skb=%p\n", skb);
|
|
|
|
+ sch->q.qlen--;
|
|
|
|
+ sch->flags &= ~TCQ_F_THROTTLED;
|
|
|
|
+ return skb;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mod_timer(&q->timer, jiffies + delay);
|
|
sch->flags |= TCQ_F_THROTTLED;
|
|
sch->flags |= TCQ_F_THROTTLED;
|
|
- }
|
|
|
|
|
|
|
|
- return skb;
|
|
|
|
|
|
+ if (q->qdisc->ops->requeue(skb, q->qdisc) != 0)
|
|
|
|
+ sch->qstats.drops++;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
static void netem_watchdog(unsigned long arg)
|
|
static void netem_watchdog(unsigned long arg)
|
|
@@ -333,8 +284,6 @@ static void netem_reset(struct Qdisc *sch)
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
|
|
|
|
qdisc_reset(q->qdisc);
|
|
qdisc_reset(q->qdisc);
|
|
- skb_queue_purge(&q->delayed);
|
|
|
|
-
|
|
|
|
sch->q.qlen = 0;
|
|
sch->q.qlen = 0;
|
|
sch->flags &= ~TCQ_F_THROTTLED;
|
|
sch->flags &= ~TCQ_F_THROTTLED;
|
|
del_timer_sync(&q->timer);
|
|
del_timer_sync(&q->timer);
|
|
@@ -460,7 +409,6 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
|
|
if (!opt)
|
|
if (!opt)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- skb_queue_head_init(&q->delayed);
|
|
|
|
init_timer(&q->timer);
|
|
init_timer(&q->timer);
|
|
q->timer.function = netem_watchdog;
|
|
q->timer.function = netem_watchdog;
|
|
q->timer.data = (unsigned long) sch;
|
|
q->timer.data = (unsigned long) sch;
|