|
@@ -82,7 +82,7 @@ struct cbq_class
|
|
unsigned char priority2; /* priority to be used after overlimit */
|
|
unsigned char priority2; /* priority to be used after overlimit */
|
|
unsigned char ewma_log; /* time constant for idle time calculation */
|
|
unsigned char ewma_log; /* time constant for idle time calculation */
|
|
unsigned char ovl_strategy;
|
|
unsigned char ovl_strategy;
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
unsigned char police;
|
|
unsigned char police;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -154,7 +154,7 @@ struct cbq_sched_data
|
|
struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
|
|
struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
|
|
with backlog */
|
|
with backlog */
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
struct cbq_class *rx_class;
|
|
struct cbq_class *rx_class;
|
|
#endif
|
|
#endif
|
|
struct cbq_class *tx_class;
|
|
struct cbq_class *tx_class;
|
|
@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
|
|
|
|
static struct cbq_class *
|
|
static struct cbq_class *
|
|
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
|
|
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
|
|
@@ -247,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|
/*
|
|
/*
|
|
* Step 2+n. Apply classifier.
|
|
* Step 2+n. Apply classifier.
|
|
*/
|
|
*/
|
|
- if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0)
|
|
|
|
|
|
+ if (!head->filter_list ||
|
|
|
|
+ (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
|
|
goto fallback;
|
|
goto fallback;
|
|
|
|
|
|
if ((cl = (void*)res.class) == NULL) {
|
|
if ((cl = (void*)res.class) == NULL) {
|
|
@@ -267,6 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|
*qerr = NET_XMIT_SUCCESS;
|
|
*qerr = NET_XMIT_SUCCESS;
|
|
case TC_ACT_SHOT:
|
|
case TC_ACT_SHOT:
|
|
return NULL;
|
|
return NULL;
|
|
|
|
+ case TC_ACT_RECLASSIFY:
|
|
|
|
+ return cbq_reclassify(skb, cl);
|
|
}
|
|
}
|
|
#elif defined(CONFIG_NET_CLS_POLICE)
|
|
#elif defined(CONFIG_NET_CLS_POLICE)
|
|
switch (result) {
|
|
switch (result) {
|
|
@@ -389,7 +392,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
int ret;
|
|
int ret;
|
|
struct cbq_class *cl = cbq_classify(skb, sch, &ret);
|
|
struct cbq_class *cl = cbq_classify(skb, sch, &ret);
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
q->rx_class = cl;
|
|
q->rx_class = cl;
|
|
#endif
|
|
#endif
|
|
if (cl == NULL) {
|
|
if (cl == NULL) {
|
|
@@ -399,7 +402,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
cl->q->__parent = sch;
|
|
cl->q->__parent = sch;
|
|
#endif
|
|
#endif
|
|
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
|
|
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
|
|
@@ -434,7 +437,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
|
|
|
cbq_mark_toplevel(q, cl);
|
|
cbq_mark_toplevel(q, cl);
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
q->rx_class = cl;
|
|
q->rx_class = cl;
|
|
cl->q->__parent = sch;
|
|
cl->q->__parent = sch;
|
|
#endif
|
|
#endif
|
|
@@ -670,7 +673,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
|
|
|
|
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
|
|
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
|
|
{
|
|
{
|
|
@@ -1364,7 +1367,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
|
|
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
|
|
{
|
|
{
|
|
cl->police = p->police;
|
|
cl->police = p->police;
|
|
@@ -1532,7 +1535,7 @@ rtattr_failure:
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
|
|
static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
|
|
{
|
|
{
|
|
unsigned char *b = skb_tail_pointer(skb);
|
|
unsigned char *b = skb_tail_pointer(skb);
|
|
@@ -1558,7 +1561,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
|
|
cbq_dump_rate(skb, cl) < 0 ||
|
|
cbq_dump_rate(skb, cl) < 0 ||
|
|
cbq_dump_wrr(skb, cl) < 0 ||
|
|
cbq_dump_wrr(skb, cl) < 0 ||
|
|
cbq_dump_ovl(skb, cl) < 0 ||
|
|
cbq_dump_ovl(skb, cl) < 0 ||
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
cbq_dump_police(skb, cl) < 0 ||
|
|
cbq_dump_police(skb, cl) < 0 ||
|
|
#endif
|
|
#endif
|
|
cbq_dump_fopt(skb, cl) < 0)
|
|
cbq_dump_fopt(skb, cl) < 0)
|
|
@@ -1653,7 +1656,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
|
|
cl->classid)) == NULL)
|
|
cl->classid)) == NULL)
|
|
return -ENOBUFS;
|
|
return -ENOBUFS;
|
|
} else {
|
|
} else {
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
if (cl->police == TC_POLICE_RECLASSIFY)
|
|
if (cl->police == TC_POLICE_RECLASSIFY)
|
|
new->reshape_fail = cbq_reshape_fail;
|
|
new->reshape_fail = cbq_reshape_fail;
|
|
#endif
|
|
#endif
|
|
@@ -1718,7 +1721,7 @@ cbq_destroy(struct Qdisc* sch)
|
|
struct cbq_class *cl;
|
|
struct cbq_class *cl;
|
|
unsigned h;
|
|
unsigned h;
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
q->rx_class = NULL;
|
|
q->rx_class = NULL;
|
|
#endif
|
|
#endif
|
|
/*
|
|
/*
|
|
@@ -1747,7 +1750,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
|
|
struct cbq_class *cl = (struct cbq_class*)arg;
|
|
struct cbq_class *cl = (struct cbq_class*)arg;
|
|
|
|
|
|
if (--cl->refcnt == 0) {
|
|
if (--cl->refcnt == 0) {
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
struct cbq_sched_data *q = qdisc_priv(sch);
|
|
struct cbq_sched_data *q = qdisc_priv(sch);
|
|
|
|
|
|
spin_lock_bh(&sch->dev->queue_lock);
|
|
spin_lock_bh(&sch->dev->queue_lock);
|
|
@@ -1795,7 +1798,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
|
|
RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
|
|
RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
if (tb[TCA_CBQ_POLICE-1] &&
|
|
if (tb[TCA_CBQ_POLICE-1] &&
|
|
RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
|
|
RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -1838,7 +1841,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
|
|
if (tb[TCA_CBQ_OVL_STRATEGY-1])
|
|
if (tb[TCA_CBQ_OVL_STRATEGY-1])
|
|
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
|
|
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
|
|
|
|
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
if (tb[TCA_CBQ_POLICE-1])
|
|
if (tb[TCA_CBQ_POLICE-1])
|
|
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
|
|
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
|
|
#endif
|
|
#endif
|
|
@@ -1931,7 +1934,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
|
|
cl->overlimit = cbq_ovl_classic;
|
|
cl->overlimit = cbq_ovl_classic;
|
|
if (tb[TCA_CBQ_OVL_STRATEGY-1])
|
|
if (tb[TCA_CBQ_OVL_STRATEGY-1])
|
|
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
|
|
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
if (tb[TCA_CBQ_POLICE-1])
|
|
if (tb[TCA_CBQ_POLICE-1])
|
|
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
|
|
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
|
|
#endif
|
|
#endif
|
|
@@ -1975,7 +1978,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
|
|
q->tx_class = NULL;
|
|
q->tx_class = NULL;
|
|
q->tx_borrowed = NULL;
|
|
q->tx_borrowed = NULL;
|
|
}
|
|
}
|
|
-#ifdef CONFIG_NET_CLS_POLICE
|
|
|
|
|
|
+#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
|
|
if (q->rx_class == cl)
|
|
if (q->rx_class == cl)
|
|
q->rx_class = NULL;
|
|
q->rx_class = NULL;
|
|
#endif
|
|
#endif
|