sch_gred.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /*
  2. * net/sched/sch_gred.c Generic Random Early Detection queue.
  3. *
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
  11. *
  12. * 991129: - Bug fix with grio mode
  13. * - a better sing. AvgQ mode with Grio(WRED)
  14. * - A finer grained VQ dequeue based on sugestion
  15. * from Ren Liu
  16. * - More error checks
  17. *
  18. *
  19. *
  20. * For all the glorious comments look at Alexey's sch_red.c
  21. */
  22. #include <linux/config.h>
  23. #include <linux/module.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/system.h>
  26. #include <linux/bitops.h>
  27. #include <linux/types.h>
  28. #include <linux/kernel.h>
  29. #include <linux/sched.h>
  30. #include <linux/string.h>
  31. #include <linux/mm.h>
  32. #include <linux/socket.h>
  33. #include <linux/sockios.h>
  34. #include <linux/in.h>
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/inet.h>
  39. #include <linux/netdevice.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/notifier.h>
  42. #include <net/ip.h>
  43. #include <net/route.h>
  44. #include <linux/skbuff.h>
  45. #include <net/sock.h>
  46. #include <net/pkt_sched.h>
  47. #if 1 /* control */
  48. #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
  49. #else
  50. #define DPRINTK(format,args...)
  51. #endif
  52. #if 0 /* data */
  53. #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
  54. #else
  55. #define D2PRINTK(format,args...)
  56. #endif
  57. #define GRED_DEF_PRIO (MAX_DPs / 2)
  58. struct gred_sched_data;
  59. struct gred_sched;
  60. struct gred_sched_data
  61. {
  62. /* Parameters */
  63. u32 limit; /* HARD maximal queue length */
  64. u32 qth_min; /* Min average length threshold: A scaled */
  65. u32 qth_max; /* Max average length threshold: A scaled */
  66. u32 DP; /* the drop pramaters */
  67. char Wlog; /* log(W) */
  68. char Plog; /* random number bits */
  69. u32 Scell_max;
  70. u32 Rmask;
  71. u32 bytesin; /* bytes seen on virtualQ so far*/
  72. u32 packetsin; /* packets seen on virtualQ so far*/
  73. u32 backlog; /* bytes on the virtualQ */
  74. u32 forced; /* packets dropped for exceeding limits */
  75. u32 early; /* packets dropped as a warning */
  76. u32 other; /* packets dropped by invoking drop() */
  77. u32 pdrop; /* packets dropped because we exceeded physical queue limits */
  78. char Scell_log;
  79. u8 Stab[256];
  80. u8 prio; /* the prio of this vq */
  81. /* Variables */
  82. unsigned long qave; /* Average queue length: A scaled */
  83. int qcount; /* Packets since last random number generation */
  84. u32 qR; /* Cached random number */
  85. psched_time_t qidlestart; /* Start of idle period */
  86. };
  87. enum {
  88. GRED_WRED_MODE = 1,
  89. GRED_RIO_MODE,
  90. };
  91. struct gred_sched
  92. {
  93. struct gred_sched_data *tab[MAX_DPs];
  94. unsigned long flags;
  95. u32 DPs;
  96. u32 def;
  97. u8 initd;
  98. };
  99. static inline int gred_wred_mode(struct gred_sched *table)
  100. {
  101. return test_bit(GRED_WRED_MODE, &table->flags);
  102. }
  103. static inline void gred_enable_wred_mode(struct gred_sched *table)
  104. {
  105. __set_bit(GRED_WRED_MODE, &table->flags);
  106. }
  107. static inline void gred_disable_wred_mode(struct gred_sched *table)
  108. {
  109. __clear_bit(GRED_WRED_MODE, &table->flags);
  110. }
  111. static inline int gred_rio_mode(struct gred_sched *table)
  112. {
  113. return test_bit(GRED_RIO_MODE, &table->flags);
  114. }
  115. static inline void gred_enable_rio_mode(struct gred_sched *table)
  116. {
  117. __set_bit(GRED_RIO_MODE, &table->flags);
  118. }
  119. static inline void gred_disable_rio_mode(struct gred_sched *table)
  120. {
  121. __clear_bit(GRED_RIO_MODE, &table->flags);
  122. }
  123. static inline int gred_wred_mode_check(struct Qdisc *sch)
  124. {
  125. struct gred_sched *table = qdisc_priv(sch);
  126. int i;
  127. /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
  128. for (i = 0; i < table->DPs; i++) {
  129. struct gred_sched_data *q = table->tab[i];
  130. int n;
  131. if (q == NULL)
  132. continue;
  133. for (n = 0; n < table->DPs; n++)
  134. if (table->tab[n] && table->tab[n] != q &&
  135. table->tab[n]->prio == q->prio)
  136. return 1;
  137. }
  138. return 0;
  139. }
  140. static int
  141. gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
  142. {
  143. psched_time_t now;
  144. struct gred_sched_data *q=NULL;
  145. struct gred_sched *t= qdisc_priv(sch);
  146. unsigned long qave=0;
  147. int i=0;
  148. if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
  149. D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
  150. goto do_enqueue;
  151. }
  152. if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
  153. printk("GRED: setting to default (%d)\n ",t->def);
  154. if (!(q=t->tab[t->def])) {
  155. DPRINTK("GRED: setting to default FAILED! dropping!! "
  156. "(%d)\n ", t->def);
  157. goto drop;
  158. }
  159. /* fix tc_index? --could be controvesial but needed for
  160. requeueing */
  161. skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
  162. }
  163. D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
  164. "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
  165. sch->qstats.backlog);
  166. /* sum up all the qaves of prios <= to ours to get the new qave*/
  167. if (!gred_wred_mode(t) && gred_rio_mode(t)) {
  168. for (i=0;i<t->DPs;i++) {
  169. if ((!t->tab[i]) || (i==q->DP))
  170. continue;
  171. if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
  172. qave +=t->tab[i]->qave;
  173. }
  174. }
  175. q->packetsin++;
  176. q->bytesin+=skb->len;
  177. if (gred_wred_mode(t)) {
  178. qave=0;
  179. q->qave=t->tab[t->def]->qave;
  180. q->qidlestart=t->tab[t->def]->qidlestart;
  181. }
  182. if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
  183. long us_idle;
  184. PSCHED_GET_TIME(now);
  185. us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
  186. PSCHED_SET_PASTPERFECT(q->qidlestart);
  187. q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
  188. } else {
  189. if (gred_wred_mode(t)) {
  190. q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
  191. } else {
  192. q->qave += q->backlog - (q->qave >> q->Wlog);
  193. }
  194. }
  195. if (gred_wred_mode(t))
  196. t->tab[t->def]->qave=q->qave;
  197. if ((q->qave+qave) < q->qth_min) {
  198. q->qcount = -1;
  199. enqueue:
  200. if (q->backlog + skb->len <= q->limit) {
  201. q->backlog += skb->len;
  202. do_enqueue:
  203. __skb_queue_tail(&sch->q, skb);
  204. sch->qstats.backlog += skb->len;
  205. sch->bstats.bytes += skb->len;
  206. sch->bstats.packets++;
  207. return 0;
  208. } else {
  209. q->pdrop++;
  210. }
  211. drop:
  212. kfree_skb(skb);
  213. sch->qstats.drops++;
  214. return NET_XMIT_DROP;
  215. }
  216. if ((q->qave+qave) >= q->qth_max) {
  217. q->qcount = -1;
  218. sch->qstats.overlimits++;
  219. q->forced++;
  220. goto drop;
  221. }
  222. if (++q->qcount) {
  223. if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
  224. goto enqueue;
  225. q->qcount = 0;
  226. q->qR = net_random()&q->Rmask;
  227. sch->qstats.overlimits++;
  228. q->early++;
  229. goto drop;
  230. }
  231. q->qR = net_random()&q->Rmask;
  232. goto enqueue;
  233. }
  234. static int
  235. gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
  236. {
  237. struct gred_sched_data *q;
  238. struct gred_sched *t= qdisc_priv(sch);
  239. q= t->tab[(skb->tc_index&0xf)];
  240. /* error checking here -- probably unnecessary */
  241. PSCHED_SET_PASTPERFECT(q->qidlestart);
  242. __skb_queue_head(&sch->q, skb);
  243. sch->qstats.backlog += skb->len;
  244. sch->qstats.requeues++;
  245. q->backlog += skb->len;
  246. return 0;
  247. }
  248. static struct sk_buff *
  249. gred_dequeue(struct Qdisc* sch)
  250. {
  251. struct sk_buff *skb;
  252. struct gred_sched_data *q;
  253. struct gred_sched *t= qdisc_priv(sch);
  254. skb = __skb_dequeue(&sch->q);
  255. if (skb) {
  256. sch->qstats.backlog -= skb->len;
  257. q= t->tab[(skb->tc_index&0xf)];
  258. if (q) {
  259. q->backlog -= skb->len;
  260. if (!q->backlog && !gred_wred_mode(t))
  261. PSCHED_GET_TIME(q->qidlestart);
  262. } else {
  263. D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
  264. }
  265. return skb;
  266. }
  267. if (gred_wred_mode(t)) {
  268. q= t->tab[t->def];
  269. if (!q)
  270. D2PRINTK("no default VQ set: Results will be "
  271. "screwed up\n");
  272. else
  273. PSCHED_GET_TIME(q->qidlestart);
  274. }
  275. return NULL;
  276. }
  277. static unsigned int gred_drop(struct Qdisc* sch)
  278. {
  279. struct sk_buff *skb;
  280. struct gred_sched_data *q;
  281. struct gred_sched *t= qdisc_priv(sch);
  282. skb = __skb_dequeue_tail(&sch->q);
  283. if (skb) {
  284. unsigned int len = skb->len;
  285. sch->qstats.backlog -= len;
  286. sch->qstats.drops++;
  287. q= t->tab[(skb->tc_index&0xf)];
  288. if (q) {
  289. q->backlog -= len;
  290. q->other++;
  291. if (!q->backlog && !gred_wred_mode(t))
  292. PSCHED_GET_TIME(q->qidlestart);
  293. } else {
  294. D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
  295. }
  296. kfree_skb(skb);
  297. return len;
  298. }
  299. q=t->tab[t->def];
  300. if (!q) {
  301. D2PRINTK("no default VQ set: Results might be screwed up\n");
  302. return 0;
  303. }
  304. PSCHED_GET_TIME(q->qidlestart);
  305. return 0;
  306. }
  307. static void gred_reset(struct Qdisc* sch)
  308. {
  309. int i;
  310. struct gred_sched_data *q;
  311. struct gred_sched *t= qdisc_priv(sch);
  312. __skb_queue_purge(&sch->q);
  313. sch->qstats.backlog = 0;
  314. for (i=0;i<t->DPs;i++) {
  315. q= t->tab[i];
  316. if (!q)
  317. continue;
  318. PSCHED_SET_PASTPERFECT(q->qidlestart);
  319. q->qave = 0;
  320. q->qcount = -1;
  321. q->backlog = 0;
  322. q->other=0;
  323. q->forced=0;
  324. q->pdrop=0;
  325. q->early=0;
  326. }
  327. }
  328. static inline void gred_destroy_vq(struct gred_sched_data *q)
  329. {
  330. kfree(q);
  331. }
  332. static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
  333. {
  334. struct gred_sched *table = qdisc_priv(sch);
  335. struct tc_gred_sopt *sopt;
  336. int i;
  337. if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
  338. return -EINVAL;
  339. sopt = RTA_DATA(dps);
  340. if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
  341. return -EINVAL;
  342. sch_tree_lock(sch);
  343. table->DPs = sopt->DPs;
  344. table->def = sopt->def_DP;
  345. /*
  346. * Every entry point to GRED is synchronized with the above code
  347. * and the DP is checked against DPs, i.e. shadowed VQs can no
  348. * longer be found so we can unlock right here.
  349. */
  350. sch_tree_unlock(sch);
  351. if (sopt->grio) {
  352. gred_enable_rio_mode(table);
  353. gred_disable_wred_mode(table);
  354. if (gred_wred_mode_check(sch))
  355. gred_enable_wred_mode(table);
  356. } else {
  357. gred_disable_rio_mode(table);
  358. gred_disable_wred_mode(table);
  359. }
  360. for (i = table->DPs; i < MAX_DPs; i++) {
  361. if (table->tab[i]) {
  362. printk(KERN_WARNING "GRED: Warning: Destroying "
  363. "shadowed VQ 0x%x\n", i);
  364. gred_destroy_vq(table->tab[i]);
  365. table->tab[i] = NULL;
  366. }
  367. }
  368. table->initd = 0;
  369. return 0;
  370. }
  371. static inline int gred_change_vq(struct Qdisc *sch, int dp,
  372. struct tc_gred_qopt *ctl, int prio, u8 *stab)
  373. {
  374. struct gred_sched *table = qdisc_priv(sch);
  375. struct gred_sched_data *q;
  376. if (table->tab[dp] == NULL) {
  377. table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
  378. if (table->tab[dp] == NULL)
  379. return -ENOMEM;
  380. memset(table->tab[dp], 0, sizeof(*q));
  381. }
  382. q = table->tab[dp];
  383. q->DP = dp;
  384. q->prio = prio;
  385. q->Wlog = ctl->Wlog;
  386. q->Plog = ctl->Plog;
  387. q->limit = ctl->limit;
  388. q->Scell_log = ctl->Scell_log;
  389. q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
  390. q->Scell_max = (255<<q->Scell_log);
  391. q->qth_min = ctl->qth_min<<ctl->Wlog;
  392. q->qth_max = ctl->qth_max<<ctl->Wlog;
  393. q->qave=0;
  394. q->backlog=0;
  395. q->qcount = -1;
  396. q->other=0;
  397. q->forced=0;
  398. q->pdrop=0;
  399. q->early=0;
  400. PSCHED_SET_PASTPERFECT(q->qidlestart);
  401. memcpy(q->Stab, stab, 256);
  402. return 0;
  403. }
  404. static int gred_change(struct Qdisc *sch, struct rtattr *opt)
  405. {
  406. struct gred_sched *table = qdisc_priv(sch);
  407. struct tc_gred_qopt *ctl;
  408. struct rtattr *tb[TCA_GRED_MAX];
  409. int err = -EINVAL, prio = GRED_DEF_PRIO;
  410. u8 *stab;
  411. if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
  412. return -EINVAL;
  413. if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
  414. return gred_change_table_def(sch, opt);
  415. if (tb[TCA_GRED_PARMS-1] == NULL ||
  416. RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
  417. tb[TCA_GRED_STAB-1] == NULL ||
  418. RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
  419. return -EINVAL;
  420. ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
  421. stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
  422. if (ctl->DP >= table->DPs)
  423. goto errout;
  424. if (gred_rio_mode(table)) {
  425. if (ctl->prio == 0) {
  426. int def_prio = GRED_DEF_PRIO;
  427. if (table->tab[table->def])
  428. def_prio = table->tab[table->def]->prio;
  429. printk(KERN_DEBUG "GRED: DP %u does not have a prio "
  430. "setting default to %d\n", ctl->DP, def_prio);
  431. prio = def_prio;
  432. } else
  433. prio = ctl->prio;
  434. }
  435. sch_tree_lock(sch);
  436. err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
  437. if (err < 0)
  438. goto errout_locked;
  439. if (table->tab[table->def] == NULL) {
  440. if (gred_rio_mode(table))
  441. prio = table->tab[ctl->DP]->prio;
  442. err = gred_change_vq(sch, table->def, ctl, prio, stab);
  443. if (err < 0)
  444. goto errout_locked;
  445. }
  446. table->initd = 1;
  447. if (gred_rio_mode(table)) {
  448. gred_disable_wred_mode(table);
  449. if (gred_wred_mode_check(sch))
  450. gred_enable_wred_mode(table);
  451. }
  452. err = 0;
  453. errout_locked:
  454. sch_tree_unlock(sch);
  455. errout:
  456. return err;
  457. }
  458. static int gred_init(struct Qdisc *sch, struct rtattr *opt)
  459. {
  460. struct rtattr *tb[TCA_GRED_MAX];
  461. if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
  462. return -EINVAL;
  463. if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
  464. return -EINVAL;
  465. return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
  466. }
  467. static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
  468. {
  469. struct gred_sched *table = qdisc_priv(sch);
  470. struct rtattr *parms, *opts = NULL;
  471. int i;
  472. struct tc_gred_sopt sopt = {
  473. .DPs = table->DPs,
  474. .def_DP = table->def,
  475. .grio = gred_rio_mode(table),
  476. };
  477. opts = RTA_NEST(skb, TCA_OPTIONS);
  478. RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
  479. parms = RTA_NEST(skb, TCA_GRED_PARMS);
  480. for (i = 0; i < MAX_DPs; i++) {
  481. struct gred_sched_data *q = table->tab[i];
  482. struct tc_gred_qopt opt;
  483. memset(&opt, 0, sizeof(opt));
  484. if (!q) {
  485. /* hack -- fix at some point with proper message
  486. This is how we indicate to tc that there is no VQ
  487. at this DP */
  488. opt.DP = MAX_DPs + i;
  489. goto append_opt;
  490. }
  491. opt.limit = q->limit;
  492. opt.DP = q->DP;
  493. opt.backlog = q->backlog;
  494. opt.prio = q->prio;
  495. opt.qth_min = q->qth_min >> q->Wlog;
  496. opt.qth_max = q->qth_max >> q->Wlog;
  497. opt.Wlog = q->Wlog;
  498. opt.Plog = q->Plog;
  499. opt.Scell_log = q->Scell_log;
  500. opt.other = q->other;
  501. opt.early = q->early;
  502. opt.forced = q->forced;
  503. opt.pdrop = q->pdrop;
  504. opt.packets = q->packetsin;
  505. opt.bytesin = q->bytesin;
  506. if (q->qave) {
  507. if (gred_wred_mode(table)) {
  508. q->qidlestart=table->tab[table->def]->qidlestart;
  509. q->qave=table->tab[table->def]->qave;
  510. }
  511. if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
  512. long idle;
  513. unsigned long qave;
  514. psched_time_t now;
  515. PSCHED_GET_TIME(now);
  516. idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
  517. qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
  518. opt.qave = qave >> q->Wlog;
  519. } else {
  520. opt.qave = q->qave >> q->Wlog;
  521. }
  522. }
  523. append_opt:
  524. RTA_APPEND(skb, sizeof(opt), &opt);
  525. }
  526. RTA_NEST_END(skb, parms);
  527. return RTA_NEST_END(skb, opts);
  528. rtattr_failure:
  529. return RTA_NEST_CANCEL(skb, opts);
  530. }
  531. static void gred_destroy(struct Qdisc *sch)
  532. {
  533. struct gred_sched *table = qdisc_priv(sch);
  534. int i;
  535. for (i = 0;i < table->DPs; i++) {
  536. if (table->tab[i])
  537. gred_destroy_vq(table->tab[i]);
  538. }
  539. }
  540. static struct Qdisc_ops gred_qdisc_ops = {
  541. .next = NULL,
  542. .cl_ops = NULL,
  543. .id = "gred",
  544. .priv_size = sizeof(struct gred_sched),
  545. .enqueue = gred_enqueue,
  546. .dequeue = gred_dequeue,
  547. .requeue = gred_requeue,
  548. .drop = gred_drop,
  549. .init = gred_init,
  550. .reset = gred_reset,
  551. .destroy = gred_destroy,
  552. .change = gred_change,
  553. .dump = gred_dump,
  554. .owner = THIS_MODULE,
  555. };
  556. static int __init gred_module_init(void)
  557. {
  558. return register_qdisc(&gred_qdisc_ops);
  559. }
  560. static void __exit gred_module_exit(void)
  561. {
  562. unregister_qdisc(&gred_qdisc_ops);
  563. }
  564. module_init(gred_module_init)
  565. module_exit(gred_module_exit)
  566. MODULE_LICENSE("GPL");