sch_fq_codel.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /*
  2. * Fair Queue CoDel discipline
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/types.h>
  13. #include <linux/kernel.h>
  14. #include <linux/jiffies.h>
  15. #include <linux/string.h>
  16. #include <linux/in.h>
  17. #include <linux/errno.h>
  18. #include <linux/init.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/jhash.h>
  21. #include <linux/slab.h>
  22. #include <linux/vmalloc.h>
  23. #include <net/netlink.h>
  24. #include <net/pkt_sched.h>
  25. #include <net/flow_keys.h>
  26. #include <net/codel.h>
  27. /* Fair Queue CoDel.
  28. *
  29. * Principles :
  30. * Packets are classified (internal classifier or external) on flows.
  31. * This is a Stochastic model (as we use a hash, several flows
  32. * might be hashed on same slot)
  33. * Each flow has a CoDel managed queue.
  34. * Flows are linked onto two (Round Robin) lists,
  35. * so that new flows have priority on old ones.
  36. *
  37. * For a given flow, packets are not reordered (CoDel uses a FIFO)
  38. * head drops only.
  39. * ECN capability is on by default.
  40. * Low memory footprint (64 bytes per flow)
  41. */
  42. struct fq_codel_flow {
  43. struct sk_buff *head;
  44. struct sk_buff *tail;
  45. struct list_head flowchain;
  46. int deficit;
  47. u32 dropped; /* number of drops (or ECN marks) on this flow */
  48. struct codel_vars cvars;
  49. }; /* please try to keep this structure <= 64 bytes */
  50. struct fq_codel_sched_data {
  51. struct tcf_proto *filter_list; /* optional external classifier */
  52. struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
  53. u32 *backlogs; /* backlog table [flows_cnt] */
  54. u32 flows_cnt; /* number of flows */
  55. u32 perturbation; /* hash perturbation */
  56. u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
  57. struct codel_params cparams;
  58. struct codel_stats cstats;
  59. u32 drop_overlimit;
  60. u32 new_flow_count;
  61. struct list_head new_flows; /* list of new flows */
  62. struct list_head old_flows; /* list of old flows */
  63. };
  64. static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
  65. const struct sk_buff *skb)
  66. {
  67. struct flow_keys keys;
  68. unsigned int hash;
  69. skb_flow_dissect(skb, &keys);
  70. hash = jhash_3words((__force u32)keys.dst,
  71. (__force u32)keys.src ^ keys.ip_proto,
  72. (__force u32)keys.ports, q->perturbation);
  73. return ((u64)hash * q->flows_cnt) >> 32;
  74. }
  75. static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
  76. int *qerr)
  77. {
  78. struct fq_codel_sched_data *q = qdisc_priv(sch);
  79. struct tcf_result res;
  80. int result;
  81. if (TC_H_MAJ(skb->priority) == sch->handle &&
  82. TC_H_MIN(skb->priority) > 0 &&
  83. TC_H_MIN(skb->priority) <= q->flows_cnt)
  84. return TC_H_MIN(skb->priority);
  85. if (!q->filter_list)
  86. return fq_codel_hash(q, skb) + 1;
  87. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  88. result = tc_classify(skb, q->filter_list, &res);
  89. if (result >= 0) {
  90. #ifdef CONFIG_NET_CLS_ACT
  91. switch (result) {
  92. case TC_ACT_STOLEN:
  93. case TC_ACT_QUEUED:
  94. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  95. case TC_ACT_SHOT:
  96. return 0;
  97. }
  98. #endif
  99. if (TC_H_MIN(res.classid) <= q->flows_cnt)
  100. return TC_H_MIN(res.classid);
  101. }
  102. return 0;
  103. }
  104. /* helper functions : might be changed when/if skb use a standard list_head */
  105. /* remove one skb from head of slot queue */
  106. static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
  107. {
  108. struct sk_buff *skb = flow->head;
  109. flow->head = skb->next;
  110. skb->next = NULL;
  111. return skb;
  112. }
  113. /* add skb to flow queue (tail add) */
  114. static inline void flow_queue_add(struct fq_codel_flow *flow,
  115. struct sk_buff *skb)
  116. {
  117. if (flow->head == NULL)
  118. flow->head = skb;
  119. else
  120. flow->tail->next = skb;
  121. flow->tail = skb;
  122. skb->next = NULL;
  123. }
  124. static unsigned int fq_codel_drop(struct Qdisc *sch)
  125. {
  126. struct fq_codel_sched_data *q = qdisc_priv(sch);
  127. struct sk_buff *skb;
  128. unsigned int maxbacklog = 0, idx = 0, i, len;
  129. struct fq_codel_flow *flow;
  130. /* Queue is full! Find the fat flow and drop packet from it.
  131. * This might sound expensive, but with 1024 flows, we scan
  132. * 4KB of memory, and we dont need to handle a complex tree
  133. * in fast path (packet queue/enqueue) with many cache misses.
  134. */
  135. for (i = 0; i < q->flows_cnt; i++) {
  136. if (q->backlogs[i] > maxbacklog) {
  137. maxbacklog = q->backlogs[i];
  138. idx = i;
  139. }
  140. }
  141. flow = &q->flows[idx];
  142. skb = dequeue_head(flow);
  143. len = qdisc_pkt_len(skb);
  144. q->backlogs[idx] -= len;
  145. kfree_skb(skb);
  146. sch->q.qlen--;
  147. sch->qstats.drops++;
  148. sch->qstats.backlog -= len;
  149. flow->dropped++;
  150. return idx;
  151. }
  152. static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  153. {
  154. struct fq_codel_sched_data *q = qdisc_priv(sch);
  155. unsigned int idx;
  156. struct fq_codel_flow *flow;
  157. int uninitialized_var(ret);
  158. idx = fq_codel_classify(skb, sch, &ret);
  159. if (idx == 0) {
  160. if (ret & __NET_XMIT_BYPASS)
  161. sch->qstats.drops++;
  162. kfree_skb(skb);
  163. return ret;
  164. }
  165. idx--;
  166. codel_set_enqueue_time(skb);
  167. flow = &q->flows[idx];
  168. flow_queue_add(flow, skb);
  169. q->backlogs[idx] += qdisc_pkt_len(skb);
  170. sch->qstats.backlog += qdisc_pkt_len(skb);
  171. if (list_empty(&flow->flowchain)) {
  172. list_add_tail(&flow->flowchain, &q->new_flows);
  173. codel_vars_init(&flow->cvars);
  174. q->new_flow_count++;
  175. flow->deficit = q->quantum;
  176. flow->dropped = 0;
  177. }
  178. if (++sch->q.qlen < sch->limit)
  179. return NET_XMIT_SUCCESS;
  180. q->drop_overlimit++;
  181. /* Return Congestion Notification only if we dropped a packet
  182. * from this flow.
  183. */
  184. if (fq_codel_drop(sch) == idx)
  185. return NET_XMIT_CN;
  186. /* As we dropped a packet, better let upper stack know this */
  187. qdisc_tree_decrease_qlen(sch, 1);
  188. return NET_XMIT_SUCCESS;
  189. }
  190. /* This is the specific function called from codel_dequeue()
  191. * to dequeue a packet from queue. Note: backlog is handled in
  192. * codel, we dont need to reduce it here.
  193. */
  194. static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
  195. {
  196. struct fq_codel_sched_data *q = qdisc_priv(sch);
  197. struct fq_codel_flow *flow;
  198. struct sk_buff *skb = NULL;
  199. flow = container_of(vars, struct fq_codel_flow, cvars);
  200. if (flow->head) {
  201. skb = dequeue_head(flow);
  202. q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
  203. sch->q.qlen--;
  204. }
  205. return skb;
  206. }
  207. static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
  208. {
  209. struct fq_codel_sched_data *q = qdisc_priv(sch);
  210. struct sk_buff *skb;
  211. struct fq_codel_flow *flow;
  212. struct list_head *head;
  213. u32 prev_drop_count, prev_ecn_mark;
  214. begin:
  215. head = &q->new_flows;
  216. if (list_empty(head)) {
  217. head = &q->old_flows;
  218. if (list_empty(head))
  219. return NULL;
  220. }
  221. flow = list_first_entry(head, struct fq_codel_flow, flowchain);
  222. if (flow->deficit <= 0) {
  223. flow->deficit += q->quantum;
  224. list_move_tail(&flow->flowchain, &q->old_flows);
  225. goto begin;
  226. }
  227. prev_drop_count = q->cstats.drop_count;
  228. prev_ecn_mark = q->cstats.ecn_mark;
  229. skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
  230. dequeue);
  231. flow->dropped += q->cstats.drop_count - prev_drop_count;
  232. flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
  233. if (!skb) {
  234. /* force a pass through old_flows to prevent starvation */
  235. if ((head == &q->new_flows) && !list_empty(&q->old_flows))
  236. list_move_tail(&flow->flowchain, &q->old_flows);
  237. else
  238. list_del_init(&flow->flowchain);
  239. goto begin;
  240. }
  241. qdisc_bstats_update(sch, skb);
  242. flow->deficit -= qdisc_pkt_len(skb);
  243. /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
  244. * or HTB crashes. Defer it for next round.
  245. */
  246. if (q->cstats.drop_count && sch->q.qlen) {
  247. qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
  248. q->cstats.drop_count = 0;
  249. }
  250. return skb;
  251. }
  252. static void fq_codel_reset(struct Qdisc *sch)
  253. {
  254. struct sk_buff *skb;
  255. while ((skb = fq_codel_dequeue(sch)) != NULL)
  256. kfree_skb(skb);
  257. }
  258. static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
  259. [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
  260. [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
  261. [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
  262. [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
  263. [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
  264. [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
  265. };
  266. static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
  267. {
  268. struct fq_codel_sched_data *q = qdisc_priv(sch);
  269. struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
  270. int err;
  271. if (!opt)
  272. return -EINVAL;
  273. err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
  274. if (err < 0)
  275. return err;
  276. if (tb[TCA_FQ_CODEL_FLOWS]) {
  277. if (q->flows)
  278. return -EINVAL;
  279. q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
  280. if (!q->flows_cnt ||
  281. q->flows_cnt > 65536)
  282. return -EINVAL;
  283. }
  284. sch_tree_lock(sch);
  285. if (tb[TCA_FQ_CODEL_TARGET]) {
  286. u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
  287. q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
  288. }
  289. if (tb[TCA_FQ_CODEL_INTERVAL]) {
  290. u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
  291. q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
  292. }
  293. if (tb[TCA_FQ_CODEL_LIMIT])
  294. sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
  295. if (tb[TCA_FQ_CODEL_ECN])
  296. q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
  297. if (tb[TCA_FQ_CODEL_QUANTUM])
  298. q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
  299. while (sch->q.qlen > sch->limit) {
  300. struct sk_buff *skb = fq_codel_dequeue(sch);
  301. kfree_skb(skb);
  302. q->cstats.drop_count++;
  303. }
  304. qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
  305. q->cstats.drop_count = 0;
  306. sch_tree_unlock(sch);
  307. return 0;
  308. }
  309. static void *fq_codel_zalloc(size_t sz)
  310. {
  311. void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
  312. if (!ptr)
  313. ptr = vzalloc(sz);
  314. return ptr;
  315. }
  316. static void fq_codel_free(void *addr)
  317. {
  318. if (addr) {
  319. if (is_vmalloc_addr(addr))
  320. vfree(addr);
  321. else
  322. kfree(addr);
  323. }
  324. }
  325. static void fq_codel_destroy(struct Qdisc *sch)
  326. {
  327. struct fq_codel_sched_data *q = qdisc_priv(sch);
  328. tcf_destroy_chain(&q->filter_list);
  329. fq_codel_free(q->backlogs);
  330. fq_codel_free(q->flows);
  331. }
  332. static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
  333. {
  334. struct fq_codel_sched_data *q = qdisc_priv(sch);
  335. int i;
  336. sch->limit = 10*1024;
  337. q->flows_cnt = 1024;
  338. q->quantum = psched_mtu(qdisc_dev(sch));
  339. q->perturbation = net_random();
  340. INIT_LIST_HEAD(&q->new_flows);
  341. INIT_LIST_HEAD(&q->old_flows);
  342. codel_params_init(&q->cparams);
  343. codel_stats_init(&q->cstats);
  344. q->cparams.ecn = true;
  345. if (opt) {
  346. int err = fq_codel_change(sch, opt);
  347. if (err)
  348. return err;
  349. }
  350. if (!q->flows) {
  351. q->flows = fq_codel_zalloc(q->flows_cnt *
  352. sizeof(struct fq_codel_flow));
  353. if (!q->flows)
  354. return -ENOMEM;
  355. q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
  356. if (!q->backlogs) {
  357. fq_codel_free(q->flows);
  358. return -ENOMEM;
  359. }
  360. for (i = 0; i < q->flows_cnt; i++) {
  361. struct fq_codel_flow *flow = q->flows + i;
  362. INIT_LIST_HEAD(&flow->flowchain);
  363. }
  364. }
  365. if (sch->limit >= 1)
  366. sch->flags |= TCQ_F_CAN_BYPASS;
  367. else
  368. sch->flags &= ~TCQ_F_CAN_BYPASS;
  369. return 0;
  370. }
  371. static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
  372. {
  373. struct fq_codel_sched_data *q = qdisc_priv(sch);
  374. struct nlattr *opts;
  375. opts = nla_nest_start(skb, TCA_OPTIONS);
  376. if (opts == NULL)
  377. goto nla_put_failure;
  378. if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
  379. codel_time_to_us(q->cparams.target)) ||
  380. nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
  381. sch->limit) ||
  382. nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
  383. codel_time_to_us(q->cparams.interval)) ||
  384. nla_put_u32(skb, TCA_FQ_CODEL_ECN,
  385. q->cparams.ecn) ||
  386. nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
  387. q->quantum) ||
  388. nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
  389. q->flows_cnt))
  390. goto nla_put_failure;
  391. nla_nest_end(skb, opts);
  392. return skb->len;
  393. nla_put_failure:
  394. return -1;
  395. }
  396. static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
  397. {
  398. struct fq_codel_sched_data *q = qdisc_priv(sch);
  399. struct tc_fq_codel_xstats st = {
  400. .type = TCA_FQ_CODEL_XSTATS_QDISC,
  401. };
  402. struct list_head *pos;
  403. st.qdisc_stats.maxpacket = q->cstats.maxpacket;
  404. st.qdisc_stats.drop_overlimit = q->drop_overlimit;
  405. st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
  406. st.qdisc_stats.new_flow_count = q->new_flow_count;
  407. list_for_each(pos, &q->new_flows)
  408. st.qdisc_stats.new_flows_len++;
  409. list_for_each(pos, &q->old_flows)
  410. st.qdisc_stats.old_flows_len++;
  411. return gnet_stats_copy_app(d, &st, sizeof(st));
  412. }
  413. static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
  414. {
  415. return NULL;
  416. }
  417. static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
  418. {
  419. return 0;
  420. }
  421. static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
  422. u32 classid)
  423. {
  424. /* we cannot bypass queue discipline anymore */
  425. sch->flags &= ~TCQ_F_CAN_BYPASS;
  426. return 0;
  427. }
  428. static void fq_codel_put(struct Qdisc *q, unsigned long cl)
  429. {
  430. }
  431. static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
  432. {
  433. struct fq_codel_sched_data *q = qdisc_priv(sch);
  434. if (cl)
  435. return NULL;
  436. return &q->filter_list;
  437. }
  438. static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
  439. struct sk_buff *skb, struct tcmsg *tcm)
  440. {
  441. tcm->tcm_handle |= TC_H_MIN(cl);
  442. return 0;
  443. }
  444. static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  445. struct gnet_dump *d)
  446. {
  447. struct fq_codel_sched_data *q = qdisc_priv(sch);
  448. u32 idx = cl - 1;
  449. struct gnet_stats_queue qs = { 0 };
  450. struct tc_fq_codel_xstats xstats;
  451. if (idx < q->flows_cnt) {
  452. const struct fq_codel_flow *flow = &q->flows[idx];
  453. const struct sk_buff *skb = flow->head;
  454. memset(&xstats, 0, sizeof(xstats));
  455. xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
  456. xstats.class_stats.deficit = flow->deficit;
  457. xstats.class_stats.ldelay =
  458. codel_time_to_us(flow->cvars.ldelay);
  459. xstats.class_stats.count = flow->cvars.count;
  460. xstats.class_stats.lastcount = flow->cvars.lastcount;
  461. xstats.class_stats.dropping = flow->cvars.dropping;
  462. if (flow->cvars.dropping) {
  463. codel_tdiff_t delta = flow->cvars.drop_next -
  464. codel_get_time();
  465. xstats.class_stats.drop_next = (delta >= 0) ?
  466. codel_time_to_us(delta) :
  467. -codel_time_to_us(-delta);
  468. }
  469. while (skb) {
  470. qs.qlen++;
  471. skb = skb->next;
  472. }
  473. qs.backlog = q->backlogs[idx];
  474. qs.drops = flow->dropped;
  475. }
  476. if (gnet_stats_copy_queue(d, &qs) < 0)
  477. return -1;
  478. if (idx < q->flows_cnt)
  479. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  480. return 0;
  481. }
  482. static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  483. {
  484. struct fq_codel_sched_data *q = qdisc_priv(sch);
  485. unsigned int i;
  486. if (arg->stop)
  487. return;
  488. for (i = 0; i < q->flows_cnt; i++) {
  489. if (list_empty(&q->flows[i].flowchain) ||
  490. arg->count < arg->skip) {
  491. arg->count++;
  492. continue;
  493. }
  494. if (arg->fn(sch, i + 1, arg) < 0) {
  495. arg->stop = 1;
  496. break;
  497. }
  498. arg->count++;
  499. }
  500. }
  501. static const struct Qdisc_class_ops fq_codel_class_ops = {
  502. .leaf = fq_codel_leaf,
  503. .get = fq_codel_get,
  504. .put = fq_codel_put,
  505. .tcf_chain = fq_codel_find_tcf,
  506. .bind_tcf = fq_codel_bind,
  507. .unbind_tcf = fq_codel_put,
  508. .dump = fq_codel_dump_class,
  509. .dump_stats = fq_codel_dump_class_stats,
  510. .walk = fq_codel_walk,
  511. };
  512. static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
  513. .cl_ops = &fq_codel_class_ops,
  514. .id = "fq_codel",
  515. .priv_size = sizeof(struct fq_codel_sched_data),
  516. .enqueue = fq_codel_enqueue,
  517. .dequeue = fq_codel_dequeue,
  518. .peek = qdisc_peek_dequeued,
  519. .drop = fq_codel_drop,
  520. .init = fq_codel_init,
  521. .reset = fq_codel_reset,
  522. .destroy = fq_codel_destroy,
  523. .change = fq_codel_change,
  524. .dump = fq_codel_dump,
  525. .dump_stats = fq_codel_dump_stats,
  526. .owner = THIS_MODULE,
  527. };
  528. static int __init fq_codel_module_init(void)
  529. {
  530. return register_qdisc(&fq_codel_qdisc_ops);
  531. }
  532. static void __exit fq_codel_module_exit(void)
  533. {
  534. unregister_qdisc(&fq_codel_qdisc_ops);
  535. }
  536. module_init(fq_codel_module_init)
  537. module_exit(fq_codel_module_exit)
  538. MODULE_AUTHOR("Eric Dumazet");
  539. MODULE_LICENSE("GPL");