sch_gred.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. /*
  2. * net/sched/sch_gred.c Generic Random Early Detection queue.
  3. *
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
  11. *
  12. * 991129: - Bug fix with grio mode
  13. * - a better sing. AvgQ mode with Grio(WRED)
  14. * - A finer grained VQ dequeue based on sugestion
  15. * from Ren Liu
  16. * - More error checks
  17. *
  18. * For all the glorious comments look at include/net/red.h
  19. */
  20. #include <linux/slab.h>
  21. #include <linux/module.h>
  22. #include <linux/types.h>
  23. #include <linux/kernel.h>
  24. #include <linux/skbuff.h>
  25. #include <net/pkt_sched.h>
  26. #include <net/red.h>
  27. #define GRED_DEF_PRIO (MAX_DPs / 2)
  28. #define GRED_VQ_MASK (MAX_DPs - 1)
  29. struct gred_sched_data;
  30. struct gred_sched;
  31. struct gred_sched_data {
  32. u32 limit; /* HARD maximal queue length */
  33. u32 DP; /* the drop parameters */
  34. u32 bytesin; /* bytes seen on virtualQ so far*/
  35. u32 packetsin; /* packets seen on virtualQ so far*/
  36. u32 backlog; /* bytes on the virtualQ */
  37. u8 prio; /* the prio of this vq */
  38. struct red_parms parms;
  39. struct red_vars vars;
  40. struct red_stats stats;
  41. };
  42. enum {
  43. GRED_WRED_MODE = 1,
  44. GRED_RIO_MODE,
  45. };
  46. struct gred_sched {
  47. struct gred_sched_data *tab[MAX_DPs];
  48. unsigned long flags;
  49. u32 red_flags;
  50. u32 DPs;
  51. u32 def;
  52. struct red_vars wred_set;
  53. };
  54. static inline int gred_wred_mode(struct gred_sched *table)
  55. {
  56. return test_bit(GRED_WRED_MODE, &table->flags);
  57. }
  58. static inline void gred_enable_wred_mode(struct gred_sched *table)
  59. {
  60. __set_bit(GRED_WRED_MODE, &table->flags);
  61. }
  62. static inline void gred_disable_wred_mode(struct gred_sched *table)
  63. {
  64. __clear_bit(GRED_WRED_MODE, &table->flags);
  65. }
  66. static inline int gred_rio_mode(struct gred_sched *table)
  67. {
  68. return test_bit(GRED_RIO_MODE, &table->flags);
  69. }
  70. static inline void gred_enable_rio_mode(struct gred_sched *table)
  71. {
  72. __set_bit(GRED_RIO_MODE, &table->flags);
  73. }
  74. static inline void gred_disable_rio_mode(struct gred_sched *table)
  75. {
  76. __clear_bit(GRED_RIO_MODE, &table->flags);
  77. }
  78. static inline int gred_wred_mode_check(struct Qdisc *sch)
  79. {
  80. struct gred_sched *table = qdisc_priv(sch);
  81. int i;
  82. /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
  83. for (i = 0; i < table->DPs; i++) {
  84. struct gred_sched_data *q = table->tab[i];
  85. int n;
  86. if (q == NULL)
  87. continue;
  88. for (n = i + 1; n < table->DPs; n++)
  89. if (table->tab[n] && table->tab[n]->prio == q->prio)
  90. return 1;
  91. }
  92. return 0;
  93. }
  94. static inline unsigned int gred_backlog(struct gred_sched *table,
  95. struct gred_sched_data *q,
  96. struct Qdisc *sch)
  97. {
  98. if (gred_wred_mode(table))
  99. return sch->qstats.backlog;
  100. else
  101. return q->backlog;
  102. }
  103. static inline u16 tc_index_to_dp(struct sk_buff *skb)
  104. {
  105. return skb->tc_index & GRED_VQ_MASK;
  106. }
  107. static inline void gred_load_wred_set(const struct gred_sched *table,
  108. struct gred_sched_data *q)
  109. {
  110. q->vars.qavg = table->wred_set.qavg;
  111. q->vars.qidlestart = table->wred_set.qidlestart;
  112. }
  113. static inline void gred_store_wred_set(struct gred_sched *table,
  114. struct gred_sched_data *q)
  115. {
  116. table->wred_set.qavg = q->vars.qavg;
  117. }
  118. static inline int gred_use_ecn(struct gred_sched *t)
  119. {
  120. return t->red_flags & TC_RED_ECN;
  121. }
  122. static inline int gred_use_harddrop(struct gred_sched *t)
  123. {
  124. return t->red_flags & TC_RED_HARDDROP;
  125. }
  126. static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  127. {
  128. struct gred_sched_data *q = NULL;
  129. struct gred_sched *t = qdisc_priv(sch);
  130. unsigned long qavg = 0;
  131. u16 dp = tc_index_to_dp(skb);
  132. if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
  133. dp = t->def;
  134. q = t->tab[dp];
  135. if (!q) {
  136. /* Pass through packets not assigned to a DP
  137. * if no default DP has been configured. This
  138. * allows for DP flows to be left untouched.
  139. */
  140. if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
  141. return qdisc_enqueue_tail(skb, sch);
  142. else
  143. goto drop;
  144. }
  145. /* fix tc_index? --could be controversial but needed for
  146. requeueing */
  147. skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
  148. }
  149. /* sum up all the qaves of prios < ours to get the new qave */
  150. if (!gred_wred_mode(t) && gred_rio_mode(t)) {
  151. int i;
  152. for (i = 0; i < t->DPs; i++) {
  153. if (t->tab[i] && t->tab[i]->prio < q->prio &&
  154. !red_is_idling(&t->tab[i]->vars))
  155. qavg += t->tab[i]->vars.qavg;
  156. }
  157. }
  158. q->packetsin++;
  159. q->bytesin += qdisc_pkt_len(skb);
  160. if (gred_wred_mode(t))
  161. gred_load_wred_set(t, q);
  162. q->vars.qavg = red_calc_qavg(&q->parms,
  163. &q->vars,
  164. gred_backlog(t, q, sch));
  165. if (red_is_idling(&q->vars))
  166. red_end_of_idle_period(&q->vars);
  167. if (gred_wred_mode(t))
  168. gred_store_wred_set(t, q);
  169. switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
  170. case RED_DONT_MARK:
  171. break;
  172. case RED_PROB_MARK:
  173. sch->qstats.overlimits++;
  174. if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
  175. q->stats.prob_drop++;
  176. goto congestion_drop;
  177. }
  178. q->stats.prob_mark++;
  179. break;
  180. case RED_HARD_MARK:
  181. sch->qstats.overlimits++;
  182. if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
  183. !INET_ECN_set_ce(skb)) {
  184. q->stats.forced_drop++;
  185. goto congestion_drop;
  186. }
  187. q->stats.forced_mark++;
  188. break;
  189. }
  190. if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
  191. q->backlog += qdisc_pkt_len(skb);
  192. return qdisc_enqueue_tail(skb, sch);
  193. }
  194. q->stats.pdrop++;
  195. drop:
  196. return qdisc_drop(skb, sch);
  197. congestion_drop:
  198. qdisc_drop(skb, sch);
  199. return NET_XMIT_CN;
  200. }
  201. static struct sk_buff *gred_dequeue(struct Qdisc *sch)
  202. {
  203. struct sk_buff *skb;
  204. struct gred_sched *t = qdisc_priv(sch);
  205. skb = qdisc_dequeue_head(sch);
  206. if (skb) {
  207. struct gred_sched_data *q;
  208. u16 dp = tc_index_to_dp(skb);
  209. if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
  210. net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
  211. tc_index_to_dp(skb));
  212. } else {
  213. q->backlog -= qdisc_pkt_len(skb);
  214. if (!q->backlog && !gred_wred_mode(t))
  215. red_start_of_idle_period(&q->vars);
  216. }
  217. return skb;
  218. }
  219. if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
  220. red_start_of_idle_period(&t->wred_set);
  221. return NULL;
  222. }
  223. static unsigned int gred_drop(struct Qdisc *sch)
  224. {
  225. struct sk_buff *skb;
  226. struct gred_sched *t = qdisc_priv(sch);
  227. skb = qdisc_dequeue_tail(sch);
  228. if (skb) {
  229. unsigned int len = qdisc_pkt_len(skb);
  230. struct gred_sched_data *q;
  231. u16 dp = tc_index_to_dp(skb);
  232. if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
  233. net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
  234. tc_index_to_dp(skb));
  235. } else {
  236. q->backlog -= len;
  237. q->stats.other++;
  238. if (!q->backlog && !gred_wred_mode(t))
  239. red_start_of_idle_period(&q->vars);
  240. }
  241. qdisc_drop(skb, sch);
  242. return len;
  243. }
  244. if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
  245. red_start_of_idle_period(&t->wred_set);
  246. return 0;
  247. }
  248. static void gred_reset(struct Qdisc *sch)
  249. {
  250. int i;
  251. struct gred_sched *t = qdisc_priv(sch);
  252. qdisc_reset_queue(sch);
  253. for (i = 0; i < t->DPs; i++) {
  254. struct gred_sched_data *q = t->tab[i];
  255. if (!q)
  256. continue;
  257. red_restart(&q->vars);
  258. q->backlog = 0;
  259. }
  260. }
  261. static inline void gred_destroy_vq(struct gred_sched_data *q)
  262. {
  263. kfree(q);
  264. }
  265. static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
  266. {
  267. struct gred_sched *table = qdisc_priv(sch);
  268. struct tc_gred_sopt *sopt;
  269. int i;
  270. if (dps == NULL)
  271. return -EINVAL;
  272. sopt = nla_data(dps);
  273. if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
  274. return -EINVAL;
  275. sch_tree_lock(sch);
  276. table->DPs = sopt->DPs;
  277. table->def = sopt->def_DP;
  278. table->red_flags = sopt->flags;
  279. /*
  280. * Every entry point to GRED is synchronized with the above code
  281. * and the DP is checked against DPs, i.e. shadowed VQs can no
  282. * longer be found so we can unlock right here.
  283. */
  284. sch_tree_unlock(sch);
  285. if (sopt->grio) {
  286. gred_enable_rio_mode(table);
  287. gred_disable_wred_mode(table);
  288. if (gred_wred_mode_check(sch))
  289. gred_enable_wred_mode(table);
  290. } else {
  291. gred_disable_rio_mode(table);
  292. gred_disable_wred_mode(table);
  293. }
  294. for (i = table->DPs; i < MAX_DPs; i++) {
  295. if (table->tab[i]) {
  296. pr_warning("GRED: Warning: Destroying "
  297. "shadowed VQ 0x%x\n", i);
  298. gred_destroy_vq(table->tab[i]);
  299. table->tab[i] = NULL;
  300. }
  301. }
  302. return 0;
  303. }
  304. static inline int gred_change_vq(struct Qdisc *sch, int dp,
  305. struct tc_gred_qopt *ctl, int prio,
  306. u8 *stab, u32 max_P,
  307. struct gred_sched_data **prealloc)
  308. {
  309. struct gred_sched *table = qdisc_priv(sch);
  310. struct gred_sched_data *q = table->tab[dp];
  311. if (!q) {
  312. table->tab[dp] = q = *prealloc;
  313. *prealloc = NULL;
  314. if (!q)
  315. return -ENOMEM;
  316. }
  317. q->DP = dp;
  318. q->prio = prio;
  319. q->limit = ctl->limit;
  320. if (q->backlog == 0)
  321. red_end_of_idle_period(&q->vars);
  322. red_set_parms(&q->parms,
  323. ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
  324. ctl->Scell_log, stab, max_P);
  325. red_set_vars(&q->vars);
  326. return 0;
  327. }
  328. static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
  329. [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
  330. [TCA_GRED_STAB] = { .len = 256 },
  331. [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
  332. [TCA_GRED_MAX_P] = { .type = NLA_U32 },
  333. };
  334. static int gred_change(struct Qdisc *sch, struct nlattr *opt)
  335. {
  336. struct gred_sched *table = qdisc_priv(sch);
  337. struct tc_gred_qopt *ctl;
  338. struct nlattr *tb[TCA_GRED_MAX + 1];
  339. int err, prio = GRED_DEF_PRIO;
  340. u8 *stab;
  341. u32 max_P;
  342. struct gred_sched_data *prealloc;
  343. if (opt == NULL)
  344. return -EINVAL;
  345. err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
  346. if (err < 0)
  347. return err;
  348. if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
  349. return gred_change_table_def(sch, opt);
  350. if (tb[TCA_GRED_PARMS] == NULL ||
  351. tb[TCA_GRED_STAB] == NULL)
  352. return -EINVAL;
  353. max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
  354. err = -EINVAL;
  355. ctl = nla_data(tb[TCA_GRED_PARMS]);
  356. stab = nla_data(tb[TCA_GRED_STAB]);
  357. if (ctl->DP >= table->DPs)
  358. goto errout;
  359. if (gred_rio_mode(table)) {
  360. if (ctl->prio == 0) {
  361. int def_prio = GRED_DEF_PRIO;
  362. if (table->tab[table->def])
  363. def_prio = table->tab[table->def]->prio;
  364. printk(KERN_DEBUG "GRED: DP %u does not have a prio "
  365. "setting default to %d\n", ctl->DP, def_prio);
  366. prio = def_prio;
  367. } else
  368. prio = ctl->prio;
  369. }
  370. prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
  371. sch_tree_lock(sch);
  372. err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
  373. if (err < 0)
  374. goto errout_locked;
  375. if (gred_rio_mode(table)) {
  376. gred_disable_wred_mode(table);
  377. if (gred_wred_mode_check(sch))
  378. gred_enable_wred_mode(table);
  379. }
  380. err = 0;
  381. errout_locked:
  382. sch_tree_unlock(sch);
  383. kfree(prealloc);
  384. errout:
  385. return err;
  386. }
  387. static int gred_init(struct Qdisc *sch, struct nlattr *opt)
  388. {
  389. struct nlattr *tb[TCA_GRED_MAX + 1];
  390. int err;
  391. if (opt == NULL)
  392. return -EINVAL;
  393. err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
  394. if (err < 0)
  395. return err;
  396. if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
  397. return -EINVAL;
  398. return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
  399. }
  400. static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
  401. {
  402. struct gred_sched *table = qdisc_priv(sch);
  403. struct nlattr *parms, *opts = NULL;
  404. int i;
  405. u32 max_p[MAX_DPs];
  406. struct tc_gred_sopt sopt = {
  407. .DPs = table->DPs,
  408. .def_DP = table->def,
  409. .grio = gred_rio_mode(table),
  410. .flags = table->red_flags,
  411. };
  412. opts = nla_nest_start(skb, TCA_OPTIONS);
  413. if (opts == NULL)
  414. goto nla_put_failure;
  415. if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
  416. goto nla_put_failure;
  417. for (i = 0; i < MAX_DPs; i++) {
  418. struct gred_sched_data *q = table->tab[i];
  419. max_p[i] = q ? q->parms.max_P : 0;
  420. }
  421. if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
  422. goto nla_put_failure;
  423. parms = nla_nest_start(skb, TCA_GRED_PARMS);
  424. if (parms == NULL)
  425. goto nla_put_failure;
  426. for (i = 0; i < MAX_DPs; i++) {
  427. struct gred_sched_data *q = table->tab[i];
  428. struct tc_gred_qopt opt;
  429. memset(&opt, 0, sizeof(opt));
  430. if (!q) {
  431. /* hack -- fix at some point with proper message
  432. This is how we indicate to tc that there is no VQ
  433. at this DP */
  434. opt.DP = MAX_DPs + i;
  435. goto append_opt;
  436. }
  437. opt.limit = q->limit;
  438. opt.DP = q->DP;
  439. opt.backlog = q->backlog;
  440. opt.prio = q->prio;
  441. opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
  442. opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
  443. opt.Wlog = q->parms.Wlog;
  444. opt.Plog = q->parms.Plog;
  445. opt.Scell_log = q->parms.Scell_log;
  446. opt.other = q->stats.other;
  447. opt.early = q->stats.prob_drop;
  448. opt.forced = q->stats.forced_drop;
  449. opt.pdrop = q->stats.pdrop;
  450. opt.packets = q->packetsin;
  451. opt.bytesin = q->bytesin;
  452. if (gred_wred_mode(table))
  453. gred_load_wred_set(table, q);
  454. opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
  455. append_opt:
  456. if (nla_append(skb, sizeof(opt), &opt) < 0)
  457. goto nla_put_failure;
  458. }
  459. nla_nest_end(skb, parms);
  460. return nla_nest_end(skb, opts);
  461. nla_put_failure:
  462. nla_nest_cancel(skb, opts);
  463. return -EMSGSIZE;
  464. }
  465. static void gred_destroy(struct Qdisc *sch)
  466. {
  467. struct gred_sched *table = qdisc_priv(sch);
  468. int i;
  469. for (i = 0; i < table->DPs; i++) {
  470. if (table->tab[i])
  471. gred_destroy_vq(table->tab[i]);
  472. }
  473. }
  474. static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
  475. .id = "gred",
  476. .priv_size = sizeof(struct gred_sched),
  477. .enqueue = gred_enqueue,
  478. .dequeue = gred_dequeue,
  479. .peek = qdisc_peek_head,
  480. .drop = gred_drop,
  481. .init = gred_init,
  482. .reset = gred_reset,
  483. .destroy = gred_destroy,
  484. .change = gred_change,
  485. .dump = gred_dump,
  486. .owner = THIS_MODULE,
  487. };
  488. static int __init gred_module_init(void)
  489. {
  490. return register_qdisc(&gred_qdisc_ops);
  491. }
  492. static void __exit gred_module_exit(void)
  493. {
  494. unregister_qdisc(&gred_qdisc_ops);
  495. }
  496. module_init(gred_module_init)
  497. module_exit(gred_module_exit)
  498. MODULE_LICENSE("GPL");