ソースを参照

[NET_SCHED]: Remove CONFIG_NET_ESTIMATOR option

The generic estimator is always built in anways and all the config options
does is prevent including a minimal amount of code for setting it up.
Additionally the option is already automatically selected for most cases.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Patrick McHardy 18 年 前
コミット
876d48aabf

+ 0 - 12
net/sched/Kconfig

@@ -286,7 +286,6 @@ config CLS_U32_MARK
 config NET_CLS_RSVP
 config NET_CLS_RSVP
 	tristate "IPv4 Resource Reservation Protocol (RSVP)"
 	tristate "IPv4 Resource Reservation Protocol (RSVP)"
 	select NET_CLS
 	select NET_CLS
-	select NET_ESTIMATOR
 	---help---
 	---help---
 	  The Resource Reservation Protocol (RSVP) permits end systems to
 	  The Resource Reservation Protocol (RSVP) permits end systems to
 	  request a minimum and maximum data flow rate for a connection; this
 	  request a minimum and maximum data flow rate for a connection; this
@@ -301,7 +300,6 @@ config NET_CLS_RSVP
 config NET_CLS_RSVP6
 config NET_CLS_RSVP6
 	tristate "IPv6 Resource Reservation Protocol (RSVP6)"
 	tristate "IPv6 Resource Reservation Protocol (RSVP6)"
 	select NET_CLS
 	select NET_CLS
-	select NET_ESTIMATOR
 	---help---
 	---help---
 	  The Resource Reservation Protocol (RSVP) permits end systems to
 	  The Resource Reservation Protocol (RSVP) permits end systems to
 	  request a minimum and maximum data flow rate for a connection; this
 	  request a minimum and maximum data flow rate for a connection; this
@@ -393,7 +391,6 @@ config NET_EMATCH_TEXT
 
 
 config NET_CLS_ACT
 config NET_CLS_ACT
 	bool "Actions"
 	bool "Actions"
-	select NET_ESTIMATOR
 	---help---
 	---help---
 	  Say Y here if you want to use traffic control actions. Actions
 	  Say Y here if you want to use traffic control actions. Actions
 	  get attached to classifiers and are invoked after a successful
 	  get attached to classifiers and are invoked after a successful
@@ -476,7 +473,6 @@ config NET_ACT_SIMP
 config NET_CLS_POLICE
 config NET_CLS_POLICE
 	bool "Traffic Policing (obsolete)"
 	bool "Traffic Policing (obsolete)"
 	depends on NET_CLS_ACT!=y
 	depends on NET_CLS_ACT!=y
-	select NET_ESTIMATOR
 	---help---
 	---help---
 	  Say Y here if you want to do traffic policing, i.e. strict
 	  Say Y here if you want to do traffic policing, i.e. strict
 	  bandwidth limiting. This option is obsoleted by the traffic
 	  bandwidth limiting. This option is obsoleted by the traffic
@@ -491,14 +487,6 @@ config NET_CLS_IND
 	  classification based on the incoming device. This option is
 	  classification based on the incoming device. This option is
 	  likely to disappear in favour of the metadata ematch.
 	  likely to disappear in favour of the metadata ematch.
 
 
-config NET_ESTIMATOR
-	bool "Rate estimator"
-	---help---
-	  Say Y here to allow using rate estimators to estimate the current
-	  rate-of-flow for network devices, queues, etc. This module is
-	  automatically selected if needed but can be selected manually for
-	  statistical purposes.
-
 endif # NET_SCHED
 endif # NET_SCHED
 
 
 endmenu
 endmenu

+ 0 - 6
net/sched/act_api.c

@@ -42,10 +42,8 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
 			write_lock_bh(hinfo->lock);
 			write_lock_bh(hinfo->lock);
 			*p1p = p->tcfc_next;
 			*p1p = p->tcfc_next;
 			write_unlock_bh(hinfo->lock);
 			write_unlock_bh(hinfo->lock);
-#ifdef CONFIG_NET_ESTIMATOR
 			gen_kill_estimator(&p->tcfc_bstats,
 			gen_kill_estimator(&p->tcfc_bstats,
 					   &p->tcfc_rate_est);
 					   &p->tcfc_rate_est);
-#endif
 			kfree(p);
 			kfree(p);
 			return;
 			return;
 		}
 		}
@@ -236,11 +234,9 @@ struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_acti
 	p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
 	p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
 	p->tcfc_tm.install = jiffies;
 	p->tcfc_tm.install = jiffies;
 	p->tcfc_tm.lastuse = jiffies;
 	p->tcfc_tm.lastuse = jiffies;
-#ifdef CONFIG_NET_ESTIMATOR
 	if (est)
 	if (est)
 		gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
 		gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
 				  p->tcfc_stats_lock, est);
 				  p->tcfc_stats_lock, est);
-#endif
 	a->priv = (void *) p;
 	a->priv = (void *) p;
 	return p;
 	return p;
 }
 }
@@ -614,9 +610,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
 			goto errout;
 			goto errout;
 
 
 	if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
 	if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
 	    gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
 	    gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
-#endif
 	    gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
 	    gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
 		goto errout;
 		goto errout;
 
 

+ 0 - 18
net/sched/act_police.c

@@ -118,10 +118,8 @@ void tcf_police_destroy(struct tcf_police *p)
 			write_lock_bh(&police_lock);
 			write_lock_bh(&police_lock);
 			*p1p = p->tcf_next;
 			*p1p = p->tcf_next;
 			write_unlock_bh(&police_lock);
 			write_unlock_bh(&police_lock);
-#ifdef CONFIG_NET_ESTIMATOR
 			gen_kill_estimator(&p->tcf_bstats,
 			gen_kill_estimator(&p->tcf_bstats,
 					   &p->tcf_rate_est);
 					   &p->tcf_rate_est);
-#endif
 			if (p->tcfp_R_tab)
 			if (p->tcfp_R_tab)
 				qdisc_put_rtab(p->tcfp_R_tab);
 				qdisc_put_rtab(p->tcfp_R_tab);
 			if (p->tcfp_P_tab)
 			if (p->tcfp_P_tab)
@@ -227,7 +225,6 @@ override:
 		police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
 		police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
 	police->tcf_action = parm->action;
 	police->tcf_action = parm->action;
 
 
-#ifdef CONFIG_NET_ESTIMATOR
 	if (tb[TCA_POLICE_AVRATE-1])
 	if (tb[TCA_POLICE_AVRATE-1])
 		police->tcfp_ewma_rate =
 		police->tcfp_ewma_rate =
 			*(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
 			*(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
@@ -235,7 +232,6 @@ override:
 		gen_replace_estimator(&police->tcf_bstats,
 		gen_replace_estimator(&police->tcf_bstats,
 				      &police->tcf_rate_est,
 				      &police->tcf_rate_est,
 				      police->tcf_stats_lock, est);
 				      police->tcf_stats_lock, est);
-#endif
 
 
 	spin_unlock_bh(&police->tcf_lock);
 	spin_unlock_bh(&police->tcf_lock);
 	if (ret != ACT_P_CREATED)
 	if (ret != ACT_P_CREATED)
@@ -281,14 +277,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
 	police->tcf_bstats.bytes += skb->len;
 	police->tcf_bstats.bytes += skb->len;
 	police->tcf_bstats.packets++;
 	police->tcf_bstats.packets++;
 
 
-#ifdef CONFIG_NET_ESTIMATOR
 	if (police->tcfp_ewma_rate &&
 	if (police->tcfp_ewma_rate &&
 	    police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
 	    police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
 		police->tcf_qstats.overlimits++;
 		police->tcf_qstats.overlimits++;
 		spin_unlock(&police->tcf_lock);
 		spin_unlock(&police->tcf_lock);
 		return police->tcf_action;
 		return police->tcf_action;
 	}
 	}
-#endif
 
 
 	if (skb->len <= police->tcfp_mtu) {
 	if (skb->len <= police->tcfp_mtu) {
 		if (police->tcfp_R_tab == NULL) {
 		if (police->tcfp_R_tab == NULL) {
@@ -348,10 +342,8 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 	if (police->tcfp_result)
 	if (police->tcfp_result)
 		RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
 		RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
 			&police->tcfp_result);
 			&police->tcfp_result);
-#ifdef CONFIG_NET_ESTIMATOR
 	if (police->tcfp_ewma_rate)
 	if (police->tcfp_ewma_rate)
 		RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
 		RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
-#endif
 	return skb->len;
 	return skb->len;
 
 
 rtattr_failure:
 rtattr_failure:
@@ -477,14 +469,12 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
 			goto failure;
 			goto failure;
 		police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
 		police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
 	}
 	}
-#ifdef CONFIG_NET_ESTIMATOR
 	if (tb[TCA_POLICE_AVRATE-1]) {
 	if (tb[TCA_POLICE_AVRATE-1]) {
 		if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
 		if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
 			goto failure;
 			goto failure;
 		police->tcfp_ewma_rate =
 		police->tcfp_ewma_rate =
 			*(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
 			*(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
 	}
 	}
-#endif
 	police->tcfp_toks = police->tcfp_burst = parm->burst;
 	police->tcfp_toks = police->tcfp_burst = parm->burst;
 	police->tcfp_mtu = parm->mtu;
 	police->tcfp_mtu = parm->mtu;
 	if (police->tcfp_mtu == 0) {
 	if (police->tcfp_mtu == 0) {
@@ -498,11 +488,9 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
 	police->tcf_index = parm->index ? parm->index :
 	police->tcf_index = parm->index ? parm->index :
 		tcf_police_new_index();
 		tcf_police_new_index();
 	police->tcf_action = parm->action;
 	police->tcf_action = parm->action;
-#ifdef CONFIG_NET_ESTIMATOR
 	if (est)
 	if (est)
 		gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
 		gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
 				  police->tcf_stats_lock, est);
 				  police->tcf_stats_lock, est);
-#endif
 	h = tcf_hash(police->tcf_index, POL_TAB_MASK);
 	h = tcf_hash(police->tcf_index, POL_TAB_MASK);
 	write_lock_bh(&police_lock);
 	write_lock_bh(&police_lock);
 	police->tcf_next = tcf_police_ht[h];
 	police->tcf_next = tcf_police_ht[h];
@@ -528,14 +516,12 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *police)
 	police->tcf_bstats.bytes += skb->len;
 	police->tcf_bstats.bytes += skb->len;
 	police->tcf_bstats.packets++;
 	police->tcf_bstats.packets++;
 
 
-#ifdef CONFIG_NET_ESTIMATOR
 	if (police->tcfp_ewma_rate &&
 	if (police->tcfp_ewma_rate &&
 	    police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
 	    police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
 		police->tcf_qstats.overlimits++;
 		police->tcf_qstats.overlimits++;
 		spin_unlock(&police->tcf_lock);
 		spin_unlock(&police->tcf_lock);
 		return police->tcf_action;
 		return police->tcf_action;
 	}
 	}
-#endif
 	if (skb->len <= police->tcfp_mtu) {
 	if (skb->len <= police->tcfp_mtu) {
 		if (police->tcfp_R_tab == NULL) {
 		if (police->tcfp_R_tab == NULL) {
 			spin_unlock(&police->tcf_lock);
 			spin_unlock(&police->tcf_lock);
@@ -591,10 +577,8 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
 	if (police->tcfp_result)
 	if (police->tcfp_result)
 		RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
 		RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
 			&police->tcfp_result);
 			&police->tcfp_result);
-#ifdef CONFIG_NET_ESTIMATOR
 	if (police->tcfp_ewma_rate)
 	if (police->tcfp_ewma_rate)
 		RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
 		RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
-#endif
 	return skb->len;
 	return skb->len;
 
 
 rtattr_failure:
 rtattr_failure:
@@ -612,9 +596,7 @@ int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
 		goto errout;
 		goto errout;
 
 
 	if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
 	if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
 	    gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
 	    gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
-#endif
 	    gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
 	    gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
 		goto errout;
 		goto errout;
 
 

+ 0 - 6
net/sched/sch_api.c

@@ -515,7 +515,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
 	sch->handle = handle;
 	sch->handle = handle;
 
 
 	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
 	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
-#ifdef CONFIG_NET_ESTIMATOR
 		if (tca[TCA_RATE-1]) {
 		if (tca[TCA_RATE-1]) {
 			err = gen_new_estimator(&sch->bstats, &sch->rate_est,
 			err = gen_new_estimator(&sch->bstats, &sch->rate_est,
 						sch->stats_lock,
 						sch->stats_lock,
@@ -531,7 +530,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
 				goto err_out3;
 				goto err_out3;
 			}
 			}
 		}
 		}
-#endif
 		qdisc_lock_tree(dev);
 		qdisc_lock_tree(dev);
 		list_add_tail(&sch->list, &dev->qdisc_list);
 		list_add_tail(&sch->list, &dev->qdisc_list);
 		qdisc_unlock_tree(dev);
 		qdisc_unlock_tree(dev);
@@ -559,11 +557,9 @@ static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
 		if (err)
 		if (err)
 			return err;
 			return err;
 	}
 	}
-#ifdef CONFIG_NET_ESTIMATOR
 	if (tca[TCA_RATE-1])
 	if (tca[TCA_RATE-1])
 		gen_replace_estimator(&sch->bstats, &sch->rate_est,
 		gen_replace_estimator(&sch->bstats, &sch->rate_est,
 			sch->stats_lock, tca[TCA_RATE-1]);
 			sch->stats_lock, tca[TCA_RATE-1]);
-#endif
 	return 0;
 	return 0;
 }
 }
 
 
@@ -839,9 +835,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 		goto rtattr_failure;
 		goto rtattr_failure;
 
 
 	if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
 	if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
 	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
 	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
-#endif
 	    gnet_stats_copy_queue(&d, &q->qstats) < 0)
 	    gnet_stats_copy_queue(&d, &q->qstats) < 0)
 		goto rtattr_failure;
 		goto rtattr_failure;
 
 

+ 0 - 8
net/sched/sch_cbq.c

@@ -1653,9 +1653,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 		cl->xstats.undertime = cl->undertime - q->now;
 		cl->xstats.undertime = cl->undertime - q->now;
 
 
 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-#endif
 	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
 	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
 		return -1;
 		return -1;
 
 
@@ -1726,9 +1724,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
 	tcf_destroy_chain(cl->filter_list);
 	tcf_destroy_chain(cl->filter_list);
 	qdisc_destroy(cl->q);
 	qdisc_destroy(cl->q);
 	qdisc_put_rtab(cl->R_tab);
 	qdisc_put_rtab(cl->R_tab);
-#ifdef CONFIG_NET_ESTIMATOR
 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
-#endif
 	if (cl != &q->link)
 	if (cl != &q->link)
 		kfree(cl);
 		kfree(cl);
 }
 }
@@ -1873,11 +1869,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
 
 
 		sch_tree_unlock(sch);
 		sch_tree_unlock(sch);
 
 
-#ifdef CONFIG_NET_ESTIMATOR
 		if (tca[TCA_RATE-1])
 		if (tca[TCA_RATE-1])
 			gen_replace_estimator(&cl->bstats, &cl->rate_est,
 			gen_replace_estimator(&cl->bstats, &cl->rate_est,
 				cl->stats_lock, tca[TCA_RATE-1]);
 				cl->stats_lock, tca[TCA_RATE-1]);
-#endif
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -1963,11 +1957,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
 		cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
 		cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
 	sch_tree_unlock(sch);
 	sch_tree_unlock(sch);
 
 
-#ifdef CONFIG_NET_ESTIMATOR
 	if (tca[TCA_RATE-1])
 	if (tca[TCA_RATE-1])
 		gen_new_estimator(&cl->bstats, &cl->rate_est,
 		gen_new_estimator(&cl->bstats, &cl->rate_est,
 			cl->stats_lock, tca[TCA_RATE-1]);
 			cl->stats_lock, tca[TCA_RATE-1]);
-#endif
 
 
 	*arg = (unsigned long)cl;
 	*arg = (unsigned long)cl;
 	return 0;
 	return 0;

+ 0 - 2
net/sched/sch_generic.c

@@ -514,9 +514,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
 		return;
 		return;
 
 
 	list_del(&qdisc->list);
 	list_del(&qdisc->list);
-#ifdef CONFIG_NET_ESTIMATOR
 	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
 	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
-#endif
 	if (ops->reset)
 	if (ops->reset)
 		ops->reset(qdisc);
 		ops->reset(qdisc);
 	if (ops->destroy)
 	if (ops->destroy)

+ 0 - 8
net/sched/sch_hfsc.c

@@ -1054,11 +1054,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 		}
 		}
 		sch_tree_unlock(sch);
 		sch_tree_unlock(sch);
 
 
-#ifdef CONFIG_NET_ESTIMATOR
 		if (tca[TCA_RATE-1])
 		if (tca[TCA_RATE-1])
 			gen_replace_estimator(&cl->bstats, &cl->rate_est,
 			gen_replace_estimator(&cl->bstats, &cl->rate_est,
 				cl->stats_lock, tca[TCA_RATE-1]);
 				cl->stats_lock, tca[TCA_RATE-1]);
-#endif
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -1112,11 +1110,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	cl->cl_pcvtoff = parent->cl_cvtoff;
 	cl->cl_pcvtoff = parent->cl_cvtoff;
 	sch_tree_unlock(sch);
 	sch_tree_unlock(sch);
 
 
-#ifdef CONFIG_NET_ESTIMATOR
 	if (tca[TCA_RATE-1])
 	if (tca[TCA_RATE-1])
 		gen_new_estimator(&cl->bstats, &cl->rate_est,
 		gen_new_estimator(&cl->bstats, &cl->rate_est,
 			cl->stats_lock, tca[TCA_RATE-1]);
 			cl->stats_lock, tca[TCA_RATE-1]);
-#endif
 	*arg = (unsigned long)cl;
 	*arg = (unsigned long)cl;
 	return 0;
 	return 0;
 }
 }
@@ -1128,9 +1124,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
 
 
 	tcf_destroy_chain(cl->filter_list);
 	tcf_destroy_chain(cl->filter_list);
 	qdisc_destroy(cl->qdisc);
 	qdisc_destroy(cl->qdisc);
-#ifdef CONFIG_NET_ESTIMATOR
 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
 	gen_kill_estimator(&cl->bstats, &cl->rate_est);
-#endif
 	if (cl != &q->root)
 	if (cl != &q->root)
 		kfree(cl);
 		kfree(cl);
 }
 }
@@ -1384,9 +1378,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 	xstats.rtwork  = cl->cl_cumul;
 	xstats.rtwork  = cl->cl_cumul;
 
 
 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
 	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
 	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-#endif
 	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
 	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
 		return -1;
 		return -1;