sch_gred.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /*
  2. * net/sched/sch_gred.c Generic Random Early Detection queue.
  3. *
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
  11. *
  12. * 991129: - Bug fix with grio mode
  13. * - a better sing. AvgQ mode with Grio(WRED)
  14. * - A finer grained VQ dequeue based on sugestion
  15. * from Ren Liu
  16. * - More error checks
  17. *
  18. *
  19. *
  20. * For all the glorious comments look at Alexey's sch_red.c
  21. */
  22. #include <linux/config.h>
  23. #include <linux/module.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/system.h>
  26. #include <linux/bitops.h>
  27. #include <linux/types.h>
  28. #include <linux/kernel.h>
  29. #include <linux/sched.h>
  30. #include <linux/string.h>
  31. #include <linux/mm.h>
  32. #include <linux/socket.h>
  33. #include <linux/sockios.h>
  34. #include <linux/in.h>
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/inet.h>
  39. #include <linux/netdevice.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/notifier.h>
  42. #include <net/ip.h>
  43. #include <net/route.h>
  44. #include <linux/skbuff.h>
  45. #include <net/sock.h>
  46. #include <net/pkt_sched.h>
  47. #if 1 /* control */
  48. #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
  49. #else
  50. #define DPRINTK(format,args...)
  51. #endif
  52. #if 0 /* data */
  53. #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
  54. #else
  55. #define D2PRINTK(format,args...)
  56. #endif
  57. struct gred_sched_data;
  58. struct gred_sched;
  59. struct gred_sched_data
  60. {
  61. /* Parameters */
  62. u32 limit; /* HARD maximal queue length */
  63. u32 qth_min; /* Min average length threshold: A scaled */
  64. u32 qth_max; /* Max average length threshold: A scaled */
  65. u32 DP; /* the drop pramaters */
  66. char Wlog; /* log(W) */
  67. char Plog; /* random number bits */
  68. u32 Scell_max;
  69. u32 Rmask;
  70. u32 bytesin; /* bytes seen on virtualQ so far*/
  71. u32 packetsin; /* packets seen on virtualQ so far*/
  72. u32 backlog; /* bytes on the virtualQ */
  73. u32 forced; /* packets dropped for exceeding limits */
  74. u32 early; /* packets dropped as a warning */
  75. u32 other; /* packets dropped by invoking drop() */
  76. u32 pdrop; /* packets dropped because we exceeded physical queue limits */
  77. char Scell_log;
  78. u8 Stab[256];
  79. u8 prio; /* the prio of this vq */
  80. /* Variables */
  81. unsigned long qave; /* Average queue length: A scaled */
  82. int qcount; /* Packets since last random number generation */
  83. u32 qR; /* Cached random number */
  84. psched_time_t qidlestart; /* Start of idle period */
  85. };
  86. enum {
  87. GRED_WRED_MODE = 1,
  88. };
  89. struct gred_sched
  90. {
  91. struct gred_sched_data *tab[MAX_DPs];
  92. unsigned long flags;
  93. u32 DPs;
  94. u32 def;
  95. u8 initd;
  96. u8 grio;
  97. };
  98. static inline int gred_wred_mode(struct gred_sched *table)
  99. {
  100. return test_bit(GRED_WRED_MODE, &table->flags);
  101. }
  102. static inline void gred_enable_wred_mode(struct gred_sched *table)
  103. {
  104. __set_bit(GRED_WRED_MODE, &table->flags);
  105. }
  106. static inline void gred_disable_wred_mode(struct gred_sched *table)
  107. {
  108. __clear_bit(GRED_WRED_MODE, &table->flags);
  109. }
  110. static inline int gred_wred_mode_check(struct Qdisc *sch)
  111. {
  112. struct gred_sched *table = qdisc_priv(sch);
  113. int i;
  114. /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
  115. for (i = 0; i < table->DPs; i++) {
  116. struct gred_sched_data *q = table->tab[i];
  117. int n;
  118. if (q == NULL)
  119. continue;
  120. for (n = 0; n < table->DPs; n++)
  121. if (table->tab[n] && table->tab[n] != q &&
  122. table->tab[n]->prio == q->prio)
  123. return 1;
  124. }
  125. return 0;
  126. }
  127. static int
  128. gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
  129. {
  130. psched_time_t now;
  131. struct gred_sched_data *q=NULL;
  132. struct gred_sched *t= qdisc_priv(sch);
  133. unsigned long qave=0;
  134. int i=0;
  135. if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
  136. D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
  137. goto do_enqueue;
  138. }
  139. if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
  140. printk("GRED: setting to default (%d)\n ",t->def);
  141. if (!(q=t->tab[t->def])) {
  142. DPRINTK("GRED: setting to default FAILED! dropping!! "
  143. "(%d)\n ", t->def);
  144. goto drop;
  145. }
  146. /* fix tc_index? --could be controvesial but needed for
  147. requeueing */
  148. skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
  149. }
  150. D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
  151. "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
  152. sch->qstats.backlog);
  153. /* sum up all the qaves of prios <= to ours to get the new qave*/
  154. if (!gred_wred_mode(t) && t->grio) {
  155. for (i=0;i<t->DPs;i++) {
  156. if ((!t->tab[i]) || (i==q->DP))
  157. continue;
  158. if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
  159. qave +=t->tab[i]->qave;
  160. }
  161. }
  162. q->packetsin++;
  163. q->bytesin+=skb->len;
  164. if (gred_wred_mode(t)) {
  165. qave=0;
  166. q->qave=t->tab[t->def]->qave;
  167. q->qidlestart=t->tab[t->def]->qidlestart;
  168. }
  169. if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
  170. long us_idle;
  171. PSCHED_GET_TIME(now);
  172. us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
  173. PSCHED_SET_PASTPERFECT(q->qidlestart);
  174. q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
  175. } else {
  176. if (gred_wred_mode(t)) {
  177. q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
  178. } else {
  179. q->qave += q->backlog - (q->qave >> q->Wlog);
  180. }
  181. }
  182. if (gred_wred_mode(t))
  183. t->tab[t->def]->qave=q->qave;
  184. if ((q->qave+qave) < q->qth_min) {
  185. q->qcount = -1;
  186. enqueue:
  187. if (q->backlog + skb->len <= q->limit) {
  188. q->backlog += skb->len;
  189. do_enqueue:
  190. __skb_queue_tail(&sch->q, skb);
  191. sch->qstats.backlog += skb->len;
  192. sch->bstats.bytes += skb->len;
  193. sch->bstats.packets++;
  194. return 0;
  195. } else {
  196. q->pdrop++;
  197. }
  198. drop:
  199. kfree_skb(skb);
  200. sch->qstats.drops++;
  201. return NET_XMIT_DROP;
  202. }
  203. if ((q->qave+qave) >= q->qth_max) {
  204. q->qcount = -1;
  205. sch->qstats.overlimits++;
  206. q->forced++;
  207. goto drop;
  208. }
  209. if (++q->qcount) {
  210. if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
  211. goto enqueue;
  212. q->qcount = 0;
  213. q->qR = net_random()&q->Rmask;
  214. sch->qstats.overlimits++;
  215. q->early++;
  216. goto drop;
  217. }
  218. q->qR = net_random()&q->Rmask;
  219. goto enqueue;
  220. }
  221. static int
  222. gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
  223. {
  224. struct gred_sched_data *q;
  225. struct gred_sched *t= qdisc_priv(sch);
  226. q= t->tab[(skb->tc_index&0xf)];
  227. /* error checking here -- probably unnecessary */
  228. PSCHED_SET_PASTPERFECT(q->qidlestart);
  229. __skb_queue_head(&sch->q, skb);
  230. sch->qstats.backlog += skb->len;
  231. sch->qstats.requeues++;
  232. q->backlog += skb->len;
  233. return 0;
  234. }
  235. static struct sk_buff *
  236. gred_dequeue(struct Qdisc* sch)
  237. {
  238. struct sk_buff *skb;
  239. struct gred_sched_data *q;
  240. struct gred_sched *t= qdisc_priv(sch);
  241. skb = __skb_dequeue(&sch->q);
  242. if (skb) {
  243. sch->qstats.backlog -= skb->len;
  244. q= t->tab[(skb->tc_index&0xf)];
  245. if (q) {
  246. q->backlog -= skb->len;
  247. if (!q->backlog && !gred_wred_mode(t))
  248. PSCHED_GET_TIME(q->qidlestart);
  249. } else {
  250. D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
  251. }
  252. return skb;
  253. }
  254. if (gred_wred_mode(t)) {
  255. q= t->tab[t->def];
  256. if (!q)
  257. D2PRINTK("no default VQ set: Results will be "
  258. "screwed up\n");
  259. else
  260. PSCHED_GET_TIME(q->qidlestart);
  261. }
  262. return NULL;
  263. }
  264. static unsigned int gred_drop(struct Qdisc* sch)
  265. {
  266. struct sk_buff *skb;
  267. struct gred_sched_data *q;
  268. struct gred_sched *t= qdisc_priv(sch);
  269. skb = __skb_dequeue_tail(&sch->q);
  270. if (skb) {
  271. unsigned int len = skb->len;
  272. sch->qstats.backlog -= len;
  273. sch->qstats.drops++;
  274. q= t->tab[(skb->tc_index&0xf)];
  275. if (q) {
  276. q->backlog -= len;
  277. q->other++;
  278. if (!q->backlog && !gred_wred_mode(t))
  279. PSCHED_GET_TIME(q->qidlestart);
  280. } else {
  281. D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
  282. }
  283. kfree_skb(skb);
  284. return len;
  285. }
  286. q=t->tab[t->def];
  287. if (!q) {
  288. D2PRINTK("no default VQ set: Results might be screwed up\n");
  289. return 0;
  290. }
  291. PSCHED_GET_TIME(q->qidlestart);
  292. return 0;
  293. }
  294. static void gred_reset(struct Qdisc* sch)
  295. {
  296. int i;
  297. struct gred_sched_data *q;
  298. struct gred_sched *t= qdisc_priv(sch);
  299. __skb_queue_purge(&sch->q);
  300. sch->qstats.backlog = 0;
  301. for (i=0;i<t->DPs;i++) {
  302. q= t->tab[i];
  303. if (!q)
  304. continue;
  305. PSCHED_SET_PASTPERFECT(q->qidlestart);
  306. q->qave = 0;
  307. q->qcount = -1;
  308. q->backlog = 0;
  309. q->other=0;
  310. q->forced=0;
  311. q->pdrop=0;
  312. q->early=0;
  313. }
  314. }
  315. static int gred_change(struct Qdisc *sch, struct rtattr *opt)
  316. {
  317. struct gred_sched *table = qdisc_priv(sch);
  318. struct gred_sched_data *q;
  319. struct tc_gred_qopt *ctl;
  320. struct tc_gred_sopt *sopt;
  321. struct rtattr *tb[TCA_GRED_STAB];
  322. struct rtattr *tb2[TCA_GRED_DPS];
  323. if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
  324. return -EINVAL;
  325. if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0) {
  326. rtattr_parse_nested(tb2, TCA_GRED_DPS, opt);
  327. if (tb2[TCA_GRED_DPS-1] == 0)
  328. return -EINVAL;
  329. sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
  330. table->DPs=sopt->DPs;
  331. table->def=sopt->def_DP;
  332. if (sopt->grio) {
  333. table->grio = 1;
  334. gred_disable_wred_mode(table);
  335. if (gred_wred_mode_check(sch))
  336. gred_enable_wred_mode(table);
  337. } else {
  338. table->grio = 0;
  339. gred_disable_wred_mode(table);
  340. }
  341. table->initd=0;
  342. /* probably need to clear all the table DP entries as well */
  343. return 0;
  344. }
  345. if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 ||
  346. RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
  347. RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
  348. return -EINVAL;
  349. ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
  350. if (ctl->DP > MAX_DPs-1 ) {
  351. /* misbehaving is punished! Put in the default drop probability */
  352. DPRINTK("\nGRED: DP %u not in the proper range fixed. New DP "
  353. "set to default at %d\n",ctl->DP,table->def);
  354. ctl->DP=table->def;
  355. }
  356. if (table->tab[ctl->DP] == NULL) {
  357. table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data),
  358. GFP_KERNEL);
  359. if (NULL == table->tab[ctl->DP])
  360. return -ENOMEM;
  361. memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data)));
  362. }
  363. q= table->tab[ctl->DP];
  364. if (table->grio) {
  365. if (ctl->prio <=0) {
  366. if (table->def && table->tab[table->def]) {
  367. DPRINTK("\nGRED: DP %u does not have a prio"
  368. "setting default to %d\n",ctl->DP,
  369. table->tab[table->def]->prio);
  370. q->prio=table->tab[table->def]->prio;
  371. } else {
  372. DPRINTK("\nGRED: DP %u does not have a prio"
  373. " setting default to 8\n",ctl->DP);
  374. q->prio=8;
  375. }
  376. } else {
  377. q->prio=ctl->prio;
  378. }
  379. } else {
  380. q->prio=8;
  381. }
  382. q->DP=ctl->DP;
  383. q->Wlog = ctl->Wlog;
  384. q->Plog = ctl->Plog;
  385. q->limit = ctl->limit;
  386. q->Scell_log = ctl->Scell_log;
  387. q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
  388. q->Scell_max = (255<<q->Scell_log);
  389. q->qth_min = ctl->qth_min<<ctl->Wlog;
  390. q->qth_max = ctl->qth_max<<ctl->Wlog;
  391. q->qave=0;
  392. q->backlog=0;
  393. q->qcount = -1;
  394. q->other=0;
  395. q->forced=0;
  396. q->pdrop=0;
  397. q->early=0;
  398. PSCHED_SET_PASTPERFECT(q->qidlestart);
  399. memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
  400. if (table->grio) {
  401. gred_disable_wred_mode(table);
  402. if (gred_wred_mode_check(sch))
  403. gred_enable_wred_mode(table);
  404. }
  405. if (!table->initd) {
  406. table->initd=1;
  407. /*
  408. the first entry also goes into the default until
  409. over-written
  410. */
  411. if (table->tab[table->def] == NULL) {
  412. table->tab[table->def]=
  413. kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
  414. if (NULL == table->tab[table->def])
  415. return -ENOMEM;
  416. memset(table->tab[table->def], 0,
  417. (sizeof(struct gred_sched_data)));
  418. }
  419. q= table->tab[table->def];
  420. q->DP=table->def;
  421. q->Wlog = ctl->Wlog;
  422. q->Plog = ctl->Plog;
  423. q->limit = ctl->limit;
  424. q->Scell_log = ctl->Scell_log;
  425. q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
  426. q->Scell_max = (255<<q->Scell_log);
  427. q->qth_min = ctl->qth_min<<ctl->Wlog;
  428. q->qth_max = ctl->qth_max<<ctl->Wlog;
  429. if (table->grio)
  430. q->prio=table->tab[ctl->DP]->prio;
  431. else
  432. q->prio=8;
  433. q->qcount = -1;
  434. PSCHED_SET_PASTPERFECT(q->qidlestart);
  435. memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
  436. }
  437. return 0;
  438. }
  439. static int gred_init(struct Qdisc *sch, struct rtattr *opt)
  440. {
  441. struct gred_sched *table = qdisc_priv(sch);
  442. struct tc_gred_sopt *sopt;
  443. struct rtattr *tb[TCA_GRED_STAB];
  444. struct rtattr *tb2[TCA_GRED_DPS];
  445. if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_STAB, opt))
  446. return -EINVAL;
  447. if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0) {
  448. rtattr_parse_nested(tb2, TCA_GRED_DPS, opt);
  449. if (tb2[TCA_GRED_DPS-1] == 0)
  450. return -EINVAL;
  451. sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
  452. table->DPs=sopt->DPs;
  453. table->def=sopt->def_DP;
  454. table->grio=sopt->grio;
  455. table->initd=0;
  456. return 0;
  457. }
  458. DPRINTK("\n GRED_INIT error!\n");
  459. return -EINVAL;
  460. }
  461. static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
  462. {
  463. unsigned long qave;
  464. struct rtattr *rta;
  465. struct tc_gred_qopt *opt = NULL ;
  466. struct tc_gred_qopt *dst;
  467. struct gred_sched *table = qdisc_priv(sch);
  468. struct gred_sched_data *q;
  469. int i;
  470. unsigned char *b = skb->tail;
  471. rta = (struct rtattr*)b;
  472. RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
  473. opt=kmalloc(sizeof(struct tc_gred_qopt)*MAX_DPs, GFP_KERNEL);
  474. if (opt == NULL) {
  475. DPRINTK("gred_dump:failed to malloc for %Zd\n",
  476. sizeof(struct tc_gred_qopt)*MAX_DPs);
  477. goto rtattr_failure;
  478. }
  479. memset(opt, 0, (sizeof(struct tc_gred_qopt))*table->DPs);
  480. if (!table->initd) {
  481. DPRINTK("NO GRED Queues setup!\n");
  482. }
  483. for (i=0;i<MAX_DPs;i++) {
  484. dst= &opt[i];
  485. q= table->tab[i];
  486. if (!q) {
  487. /* hack -- fix at some point with proper message
  488. This is how we indicate to tc that there is no VQ
  489. at this DP */
  490. dst->DP=MAX_DPs+i;
  491. continue;
  492. }
  493. dst->limit=q->limit;
  494. dst->qth_min=q->qth_min>>q->Wlog;
  495. dst->qth_max=q->qth_max>>q->Wlog;
  496. dst->DP=q->DP;
  497. dst->backlog=q->backlog;
  498. if (q->qave) {
  499. if (gred_wred_mode(table)) {
  500. q->qidlestart=table->tab[table->def]->qidlestart;
  501. q->qave=table->tab[table->def]->qave;
  502. }
  503. if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
  504. long idle;
  505. psched_time_t now;
  506. PSCHED_GET_TIME(now);
  507. idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
  508. qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
  509. dst->qave = qave >> q->Wlog;
  510. } else {
  511. dst->qave = q->qave >> q->Wlog;
  512. }
  513. } else {
  514. dst->qave = 0;
  515. }
  516. dst->Wlog = q->Wlog;
  517. dst->Plog = q->Plog;
  518. dst->Scell_log = q->Scell_log;
  519. dst->other = q->other;
  520. dst->forced = q->forced;
  521. dst->early = q->early;
  522. dst->pdrop = q->pdrop;
  523. dst->prio = q->prio;
  524. dst->packets=q->packetsin;
  525. dst->bytesin=q->bytesin;
  526. }
  527. RTA_PUT(skb, TCA_GRED_PARMS, sizeof(struct tc_gred_qopt)*MAX_DPs, opt);
  528. rta->rta_len = skb->tail - b;
  529. kfree(opt);
  530. return skb->len;
  531. rtattr_failure:
  532. if (opt)
  533. kfree(opt);
  534. DPRINTK("gred_dump: FAILURE!!!!\n");
  535. /* also free the opt struct here */
  536. skb_trim(skb, b - skb->data);
  537. return -1;
  538. }
  539. static void gred_destroy(struct Qdisc *sch)
  540. {
  541. struct gred_sched *table = qdisc_priv(sch);
  542. int i;
  543. for (i = 0;i < table->DPs; i++) {
  544. if (table->tab[i])
  545. kfree(table->tab[i]);
  546. }
  547. }
  548. static struct Qdisc_ops gred_qdisc_ops = {
  549. .next = NULL,
  550. .cl_ops = NULL,
  551. .id = "gred",
  552. .priv_size = sizeof(struct gred_sched),
  553. .enqueue = gred_enqueue,
  554. .dequeue = gred_dequeue,
  555. .requeue = gred_requeue,
  556. .drop = gred_drop,
  557. .init = gred_init,
  558. .reset = gred_reset,
  559. .destroy = gred_destroy,
  560. .change = gred_change,
  561. .dump = gred_dump,
  562. .owner = THIS_MODULE,
  563. };
  564. static int __init gred_module_init(void)
  565. {
  566. return register_qdisc(&gred_qdisc_ops);
  567. }
  568. static void __exit gred_module_exit(void)
  569. {
  570. unregister_qdisc(&gred_qdisc_ops);
  571. }
  572. module_init(gred_module_init)
  573. module_exit(gred_module_exit)
  574. MODULE_LICENSE("GPL");