sch_gred.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * net/sched/sch_gred.c Generic Random Early Detection queue.
  3. *
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
  11. *
  12. * 991129: - Bug fix with grio mode
  13. * - a better sing. AvgQ mode with Grio(WRED)
  14. * - A finer grained VQ dequeue based on sugestion
  15. * from Ren Liu
  16. * - More error checks
  17. *
  18. *
  19. *
  20. * For all the glorious comments look at Alexey's sch_red.c
  21. */
  22. #include <linux/config.h>
  23. #include <linux/module.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/system.h>
  26. #include <linux/bitops.h>
  27. #include <linux/types.h>
  28. #include <linux/kernel.h>
  29. #include <linux/sched.h>
  30. #include <linux/string.h>
  31. #include <linux/mm.h>
  32. #include <linux/socket.h>
  33. #include <linux/sockios.h>
  34. #include <linux/in.h>
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/inet.h>
  39. #include <linux/netdevice.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/notifier.h>
  42. #include <net/ip.h>
  43. #include <net/route.h>
  44. #include <linux/skbuff.h>
  45. #include <net/sock.h>
  46. #include <net/pkt_sched.h>
  47. #if 1 /* control */
  48. #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
  49. #else
  50. #define DPRINTK(format,args...)
  51. #endif
  52. #if 0 /* data */
  53. #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
  54. #else
  55. #define D2PRINTK(format,args...)
  56. #endif
  57. struct gred_sched_data;
  58. struct gred_sched;
  59. struct gred_sched_data
  60. {
  61. /* Parameters */
  62. u32 limit; /* HARD maximal queue length */
  63. u32 qth_min; /* Min average length threshold: A scaled */
  64. u32 qth_max; /* Max average length threshold: A scaled */
  65. u32 DP; /* the drop pramaters */
  66. char Wlog; /* log(W) */
  67. char Plog; /* random number bits */
  68. u32 Scell_max;
  69. u32 Rmask;
  70. u32 bytesin; /* bytes seen on virtualQ so far*/
  71. u32 packetsin; /* packets seen on virtualQ so far*/
  72. u32 backlog; /* bytes on the virtualQ */
  73. u32 forced; /* packets dropped for exceeding limits */
  74. u32 early; /* packets dropped as a warning */
  75. u32 other; /* packets dropped by invoking drop() */
  76. u32 pdrop; /* packets dropped because we exceeded physical queue limits */
  77. char Scell_log;
  78. u8 Stab[256];
  79. u8 prio; /* the prio of this vq */
  80. /* Variables */
  81. unsigned long qave; /* Average queue length: A scaled */
  82. int qcount; /* Packets since last random number generation */
  83. u32 qR; /* Cached random number */
  84. psched_time_t qidlestart; /* Start of idle period */
  85. };
  86. enum {
  87. GRED_WRED_MODE = 1,
  88. GRED_RIO_MODE,
  89. };
  90. struct gred_sched
  91. {
  92. struct gred_sched_data *tab[MAX_DPs];
  93. unsigned long flags;
  94. u32 DPs;
  95. u32 def;
  96. u8 initd;
  97. };
  98. static inline int gred_wred_mode(struct gred_sched *table)
  99. {
  100. return test_bit(GRED_WRED_MODE, &table->flags);
  101. }
  102. static inline void gred_enable_wred_mode(struct gred_sched *table)
  103. {
  104. __set_bit(GRED_WRED_MODE, &table->flags);
  105. }
  106. static inline void gred_disable_wred_mode(struct gred_sched *table)
  107. {
  108. __clear_bit(GRED_WRED_MODE, &table->flags);
  109. }
  110. static inline int gred_rio_mode(struct gred_sched *table)
  111. {
  112. return test_bit(GRED_RIO_MODE, &table->flags);
  113. }
  114. static inline void gred_enable_rio_mode(struct gred_sched *table)
  115. {
  116. __set_bit(GRED_RIO_MODE, &table->flags);
  117. }
  118. static inline void gred_disable_rio_mode(struct gred_sched *table)
  119. {
  120. __clear_bit(GRED_RIO_MODE, &table->flags);
  121. }
  122. static inline int gred_wred_mode_check(struct Qdisc *sch)
  123. {
  124. struct gred_sched *table = qdisc_priv(sch);
  125. int i;
  126. /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
  127. for (i = 0; i < table->DPs; i++) {
  128. struct gred_sched_data *q = table->tab[i];
  129. int n;
  130. if (q == NULL)
  131. continue;
  132. for (n = 0; n < table->DPs; n++)
  133. if (table->tab[n] && table->tab[n] != q &&
  134. table->tab[n]->prio == q->prio)
  135. return 1;
  136. }
  137. return 0;
  138. }
  139. static int
  140. gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
  141. {
  142. psched_time_t now;
  143. struct gred_sched_data *q=NULL;
  144. struct gred_sched *t= qdisc_priv(sch);
  145. unsigned long qave=0;
  146. int i=0;
  147. if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
  148. D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
  149. goto do_enqueue;
  150. }
  151. if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
  152. printk("GRED: setting to default (%d)\n ",t->def);
  153. if (!(q=t->tab[t->def])) {
  154. DPRINTK("GRED: setting to default FAILED! dropping!! "
  155. "(%d)\n ", t->def);
  156. goto drop;
  157. }
  158. /* fix tc_index? --could be controvesial but needed for
  159. requeueing */
  160. skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
  161. }
  162. D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
  163. "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
  164. sch->qstats.backlog);
  165. /* sum up all the qaves of prios <= to ours to get the new qave*/
  166. if (!gred_wred_mode(t) && gred_rio_mode(t)) {
  167. for (i=0;i<t->DPs;i++) {
  168. if ((!t->tab[i]) || (i==q->DP))
  169. continue;
  170. if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
  171. qave +=t->tab[i]->qave;
  172. }
  173. }
  174. q->packetsin++;
  175. q->bytesin+=skb->len;
  176. if (gred_wred_mode(t)) {
  177. qave=0;
  178. q->qave=t->tab[t->def]->qave;
  179. q->qidlestart=t->tab[t->def]->qidlestart;
  180. }
  181. if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
  182. long us_idle;
  183. PSCHED_GET_TIME(now);
  184. us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
  185. PSCHED_SET_PASTPERFECT(q->qidlestart);
  186. q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
  187. } else {
  188. if (gred_wred_mode(t)) {
  189. q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
  190. } else {
  191. q->qave += q->backlog - (q->qave >> q->Wlog);
  192. }
  193. }
  194. if (gred_wred_mode(t))
  195. t->tab[t->def]->qave=q->qave;
  196. if ((q->qave+qave) < q->qth_min) {
  197. q->qcount = -1;
  198. enqueue:
  199. if (q->backlog + skb->len <= q->limit) {
  200. q->backlog += skb->len;
  201. do_enqueue:
  202. __skb_queue_tail(&sch->q, skb);
  203. sch->qstats.backlog += skb->len;
  204. sch->bstats.bytes += skb->len;
  205. sch->bstats.packets++;
  206. return 0;
  207. } else {
  208. q->pdrop++;
  209. }
  210. drop:
  211. kfree_skb(skb);
  212. sch->qstats.drops++;
  213. return NET_XMIT_DROP;
  214. }
  215. if ((q->qave+qave) >= q->qth_max) {
  216. q->qcount = -1;
  217. sch->qstats.overlimits++;
  218. q->forced++;
  219. goto drop;
  220. }
  221. if (++q->qcount) {
  222. if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
  223. goto enqueue;
  224. q->qcount = 0;
  225. q->qR = net_random()&q->Rmask;
  226. sch->qstats.overlimits++;
  227. q->early++;
  228. goto drop;
  229. }
  230. q->qR = net_random()&q->Rmask;
  231. goto enqueue;
  232. }
  233. static int
  234. gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
  235. {
  236. struct gred_sched_data *q;
  237. struct gred_sched *t= qdisc_priv(sch);
  238. q= t->tab[(skb->tc_index&0xf)];
  239. /* error checking here -- probably unnecessary */
  240. PSCHED_SET_PASTPERFECT(q->qidlestart);
  241. __skb_queue_head(&sch->q, skb);
  242. sch->qstats.backlog += skb->len;
  243. sch->qstats.requeues++;
  244. q->backlog += skb->len;
  245. return 0;
  246. }
  247. static struct sk_buff *
  248. gred_dequeue(struct Qdisc* sch)
  249. {
  250. struct sk_buff *skb;
  251. struct gred_sched_data *q;
  252. struct gred_sched *t= qdisc_priv(sch);
  253. skb = __skb_dequeue(&sch->q);
  254. if (skb) {
  255. sch->qstats.backlog -= skb->len;
  256. q= t->tab[(skb->tc_index&0xf)];
  257. if (q) {
  258. q->backlog -= skb->len;
  259. if (!q->backlog && !gred_wred_mode(t))
  260. PSCHED_GET_TIME(q->qidlestart);
  261. } else {
  262. D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
  263. }
  264. return skb;
  265. }
  266. if (gred_wred_mode(t)) {
  267. q= t->tab[t->def];
  268. if (!q)
  269. D2PRINTK("no default VQ set: Results will be "
  270. "screwed up\n");
  271. else
  272. PSCHED_GET_TIME(q->qidlestart);
  273. }
  274. return NULL;
  275. }
  276. static unsigned int gred_drop(struct Qdisc* sch)
  277. {
  278. struct sk_buff *skb;
  279. struct gred_sched_data *q;
  280. struct gred_sched *t= qdisc_priv(sch);
  281. skb = __skb_dequeue_tail(&sch->q);
  282. if (skb) {
  283. unsigned int len = skb->len;
  284. sch->qstats.backlog -= len;
  285. sch->qstats.drops++;
  286. q= t->tab[(skb->tc_index&0xf)];
  287. if (q) {
  288. q->backlog -= len;
  289. q->other++;
  290. if (!q->backlog && !gred_wred_mode(t))
  291. PSCHED_GET_TIME(q->qidlestart);
  292. } else {
  293. D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
  294. }
  295. kfree_skb(skb);
  296. return len;
  297. }
  298. q=t->tab[t->def];
  299. if (!q) {
  300. D2PRINTK("no default VQ set: Results might be screwed up\n");
  301. return 0;
  302. }
  303. PSCHED_GET_TIME(q->qidlestart);
  304. return 0;
  305. }
  306. static void gred_reset(struct Qdisc* sch)
  307. {
  308. int i;
  309. struct gred_sched_data *q;
  310. struct gred_sched *t= qdisc_priv(sch);
  311. __skb_queue_purge(&sch->q);
  312. sch->qstats.backlog = 0;
  313. for (i=0;i<t->DPs;i++) {
  314. q= t->tab[i];
  315. if (!q)
  316. continue;
  317. PSCHED_SET_PASTPERFECT(q->qidlestart);
  318. q->qave = 0;
  319. q->qcount = -1;
  320. q->backlog = 0;
  321. q->other=0;
  322. q->forced=0;
  323. q->pdrop=0;
  324. q->early=0;
  325. }
  326. }
  327. static inline void gred_destroy_vq(struct gred_sched_data *q)
  328. {
  329. kfree(q);
  330. }
  331. static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
  332. {
  333. struct gred_sched *table = qdisc_priv(sch);
  334. struct tc_gred_sopt *sopt;
  335. int i;
  336. if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
  337. return -EINVAL;
  338. sopt = RTA_DATA(dps);
  339. if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
  340. return -EINVAL;
  341. sch_tree_lock(sch);
  342. table->DPs = sopt->DPs;
  343. table->def = sopt->def_DP;
  344. /*
  345. * Every entry point to GRED is synchronized with the above code
  346. * and the DP is checked against DPs, i.e. shadowed VQs can no
  347. * longer be found so we can unlock right here.
  348. */
  349. sch_tree_unlock(sch);
  350. if (sopt->grio) {
  351. gred_enable_rio_mode(table);
  352. gred_disable_wred_mode(table);
  353. if (gred_wred_mode_check(sch))
  354. gred_enable_wred_mode(table);
  355. } else {
  356. gred_disable_rio_mode(table);
  357. gred_disable_wred_mode(table);
  358. }
  359. for (i = table->DPs; i < MAX_DPs; i++) {
  360. if (table->tab[i]) {
  361. printk(KERN_WARNING "GRED: Warning: Destroying "
  362. "shadowed VQ 0x%x\n", i);
  363. gred_destroy_vq(table->tab[i]);
  364. table->tab[i] = NULL;
  365. }
  366. }
  367. table->initd = 0;
  368. return 0;
  369. }
  370. static int gred_change(struct Qdisc *sch, struct rtattr *opt)
  371. {
  372. struct gred_sched *table = qdisc_priv(sch);
  373. struct gred_sched_data *q;
  374. struct tc_gred_qopt *ctl;
  375. struct rtattr *tb[TCA_GRED_STAB];
  376. if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
  377. return -EINVAL;
  378. if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
  379. return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
  380. if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 ||
  381. RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
  382. RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
  383. return -EINVAL;
  384. ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
  385. if (ctl->DP > MAX_DPs-1 ) {
  386. /* misbehaving is punished! Put in the default drop probability */
  387. DPRINTK("\nGRED: DP %u not in the proper range fixed. New DP "
  388. "set to default at %d\n",ctl->DP,table->def);
  389. ctl->DP=table->def;
  390. }
  391. if (table->tab[ctl->DP] == NULL) {
  392. table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data),
  393. GFP_KERNEL);
  394. if (NULL == table->tab[ctl->DP])
  395. return -ENOMEM;
  396. memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data)));
  397. }
  398. q= table->tab[ctl->DP];
  399. if (gred_rio_mode(table)) {
  400. if (ctl->prio <=0) {
  401. if (table->def && table->tab[table->def]) {
  402. DPRINTK("\nGRED: DP %u does not have a prio"
  403. "setting default to %d\n",ctl->DP,
  404. table->tab[table->def]->prio);
  405. q->prio=table->tab[table->def]->prio;
  406. } else {
  407. DPRINTK("\nGRED: DP %u does not have a prio"
  408. " setting default to 8\n",ctl->DP);
  409. q->prio=8;
  410. }
  411. } else {
  412. q->prio=ctl->prio;
  413. }
  414. } else {
  415. q->prio=8;
  416. }
  417. q->DP=ctl->DP;
  418. q->Wlog = ctl->Wlog;
  419. q->Plog = ctl->Plog;
  420. q->limit = ctl->limit;
  421. q->Scell_log = ctl->Scell_log;
  422. q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
  423. q->Scell_max = (255<<q->Scell_log);
  424. q->qth_min = ctl->qth_min<<ctl->Wlog;
  425. q->qth_max = ctl->qth_max<<ctl->Wlog;
  426. q->qave=0;
  427. q->backlog=0;
  428. q->qcount = -1;
  429. q->other=0;
  430. q->forced=0;
  431. q->pdrop=0;
  432. q->early=0;
  433. PSCHED_SET_PASTPERFECT(q->qidlestart);
  434. memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
  435. if (gred_rio_mode(table)) {
  436. gred_disable_wred_mode(table);
  437. if (gred_wred_mode_check(sch))
  438. gred_enable_wred_mode(table);
  439. }
  440. if (!table->initd) {
  441. table->initd=1;
  442. /*
  443. the first entry also goes into the default until
  444. over-written
  445. */
  446. if (table->tab[table->def] == NULL) {
  447. table->tab[table->def]=
  448. kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
  449. if (NULL == table->tab[table->def])
  450. return -ENOMEM;
  451. memset(table->tab[table->def], 0,
  452. (sizeof(struct gred_sched_data)));
  453. }
  454. q= table->tab[table->def];
  455. q->DP=table->def;
  456. q->Wlog = ctl->Wlog;
  457. q->Plog = ctl->Plog;
  458. q->limit = ctl->limit;
  459. q->Scell_log = ctl->Scell_log;
  460. q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
  461. q->Scell_max = (255<<q->Scell_log);
  462. q->qth_min = ctl->qth_min<<ctl->Wlog;
  463. q->qth_max = ctl->qth_max<<ctl->Wlog;
  464. if (gred_rio_mode(table))
  465. q->prio=table->tab[ctl->DP]->prio;
  466. else
  467. q->prio=8;
  468. q->qcount = -1;
  469. PSCHED_SET_PASTPERFECT(q->qidlestart);
  470. memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
  471. }
  472. return 0;
  473. }
  474. static int gred_init(struct Qdisc *sch, struct rtattr *opt)
  475. {
  476. struct rtattr *tb[TCA_GRED_MAX];
  477. if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
  478. return -EINVAL;
  479. if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
  480. return -EINVAL;
  481. return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
  482. }
  483. static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
  484. {
  485. struct gred_sched *table = qdisc_priv(sch);
  486. struct rtattr *parms, *opts = NULL;
  487. int i;
  488. struct tc_gred_sopt sopt = {
  489. .DPs = table->DPs,
  490. .def_DP = table->def,
  491. .grio = gred_rio_mode(table),
  492. };
  493. opts = RTA_NEST(skb, TCA_OPTIONS);
  494. RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
  495. parms = RTA_NEST(skb, TCA_GRED_PARMS);
  496. for (i = 0; i < MAX_DPs; i++) {
  497. struct gred_sched_data *q = table->tab[i];
  498. struct tc_gred_qopt opt;
  499. memset(&opt, 0, sizeof(opt));
  500. if (!q) {
  501. /* hack -- fix at some point with proper message
  502. This is how we indicate to tc that there is no VQ
  503. at this DP */
  504. opt.DP = MAX_DPs + i;
  505. goto append_opt;
  506. }
  507. opt.limit = q->limit;
  508. opt.DP = q->DP;
  509. opt.backlog = q->backlog;
  510. opt.prio = q->prio;
  511. opt.qth_min = q->qth_min >> q->Wlog;
  512. opt.qth_max = q->qth_max >> q->Wlog;
  513. opt.Wlog = q->Wlog;
  514. opt.Plog = q->Plog;
  515. opt.Scell_log = q->Scell_log;
  516. opt.other = q->other;
  517. opt.early = q->early;
  518. opt.forced = q->forced;
  519. opt.pdrop = q->pdrop;
  520. opt.packets = q->packetsin;
  521. opt.bytesin = q->bytesin;
  522. if (q->qave) {
  523. if (gred_wred_mode(table)) {
  524. q->qidlestart=table->tab[table->def]->qidlestart;
  525. q->qave=table->tab[table->def]->qave;
  526. }
  527. if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
  528. long idle;
  529. unsigned long qave;
  530. psched_time_t now;
  531. PSCHED_GET_TIME(now);
  532. idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
  533. qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
  534. opt.qave = qave >> q->Wlog;
  535. } else {
  536. opt.qave = q->qave >> q->Wlog;
  537. }
  538. }
  539. append_opt:
  540. RTA_APPEND(skb, sizeof(opt), &opt);
  541. }
  542. RTA_NEST_END(skb, parms);
  543. return RTA_NEST_END(skb, opts);
  544. rtattr_failure:
  545. return RTA_NEST_CANCEL(skb, opts);
  546. }
  547. static void gred_destroy(struct Qdisc *sch)
  548. {
  549. struct gred_sched *table = qdisc_priv(sch);
  550. int i;
  551. for (i = 0;i < table->DPs; i++) {
  552. if (table->tab[i])
  553. gred_destroy_vq(table->tab[i]);
  554. }
  555. }
  556. static struct Qdisc_ops gred_qdisc_ops = {
  557. .next = NULL,
  558. .cl_ops = NULL,
  559. .id = "gred",
  560. .priv_size = sizeof(struct gred_sched),
  561. .enqueue = gred_enqueue,
  562. .dequeue = gred_dequeue,
  563. .requeue = gred_requeue,
  564. .drop = gred_drop,
  565. .init = gred_init,
  566. .reset = gred_reset,
  567. .destroy = gred_destroy,
  568. .change = gred_change,
  569. .dump = gred_dump,
  570. .owner = THIS_MODULE,
  571. };
  572. static int __init gred_module_init(void)
  573. {
  574. return register_qdisc(&gred_qdisc_ops);
  575. }
  576. static void __exit gred_module_exit(void)
  577. {
  578. unregister_qdisc(&gred_qdisc_ops);
  579. }
  580. module_init(gred_module_init)
  581. module_exit(gred_module_exit)
  582. MODULE_LICENSE("GPL");