red.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. #ifndef __NET_SCHED_RED_H
  2. #define __NET_SCHED_RED_H
  3. #include <linux/config.h>
  4. #include <linux/types.h>
  5. #include <net/pkt_sched.h>
  6. #include <net/inet_ecn.h>
  7. #include <net/dsfield.h>
  8. /* Random Early Detection (RED) algorithm.
  9. =======================================
  10. Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways
  11. for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking.
  12. This file codes a "divisionless" version of RED algorithm
  13. as written down in Fig.17 of the paper.
  14. Short description.
  15. ------------------
  16. When a new packet arrives we calculate the average queue length:
  17. avg = (1-W)*avg + W*current_queue_len,
  18. W is the filter time constant (chosen as 2^(-Wlog)), it controls
  19. the inertia of the algorithm. To allow larger bursts, W should be
  20. decreased.
  21. if (avg > th_max) -> packet marked (dropped).
  22. if (avg < th_min) -> packet passes.
  23. if (th_min < avg < th_max) we calculate probability:
  24. Pb = max_P * (avg - th_min)/(th_max-th_min)
  25. and mark (drop) packet with this probability.
  26. Pb changes from 0 (at avg==th_min) to max_P (avg==th_max).
  27. max_P should be small (not 1), usually 0.01..0.02 is good value.
  28. max_P is chosen as a number, so that max_P/(th_max-th_min)
  29. is a negative power of two in order arithmetics to contain
  30. only shifts.
  31. Parameters, settable by user:
  32. -----------------------------
  33. qth_min - bytes (should be < qth_max/2)
  34. qth_max - bytes (should be at least 2*qth_min and less limit)
  35. Wlog - bits (<32) log(1/W).
  36. Plog - bits (<32)
  37. Plog is related to max_P by formula:
  38. max_P = (qth_max-qth_min)/2^Plog;
  39. F.e. if qth_max=128K and qth_min=32K, then Plog=22
  40. corresponds to max_P=0.02
  41. Scell_log
  42. Stab
  43. Lookup table for log((1-W)^(t/t_ave).
  44. NOTES:
  45. Upper bound on W.
  46. -----------------
  47. If you want to allow bursts of L packets of size S,
  48. you should choose W:
  49. L + 1 - th_min/S < (1-(1-W)^L)/W
  50. th_min/S = 32 th_min/S = 4
  51. log(W) L
  52. -1 33
  53. -2 35
  54. -3 39
  55. -4 46
  56. -5 57
  57. -6 75
  58. -7 101
  59. -8 135
  60. -9 190
  61. etc.
  62. */
  63. #define RED_STAB_SIZE 256
  64. #define RED_STAB_MASK (RED_STAB_SIZE - 1)
  65. struct red_stats
  66. {
  67. u32 prob_drop; /* Early probability drops */
  68. u32 prob_mark; /* Early probability marks */
  69. u32 forced_drop; /* Forced drops, qavg > max_thresh */
  70. u32 forced_mark; /* Forced marks, qavg > max_thresh */
  71. u32 pdrop; /* Drops due to queue limits */
  72. u32 other; /* Drops due to drop() calls */
  73. u32 backlog;
  74. };
  75. struct red_parms
  76. {
  77. /* Parameters */
  78. u32 qth_min; /* Min avg length threshold: A scaled */
  79. u32 qth_max; /* Max avg length threshold: A scaled */
  80. u32 Scell_max;
  81. u32 Rmask; /* Cached random mask, see red_rmask */
  82. u8 Scell_log;
  83. u8 Wlog; /* log(W) */
  84. u8 Plog; /* random number bits */
  85. u8 Stab[RED_STAB_SIZE];
  86. /* Variables */
  87. int qcount; /* Number of packets since last random
  88. number generation */
  89. u32 qR; /* Cached random number */
  90. unsigned long qavg; /* Average queue length: A scaled */
  91. psched_time_t qidlestart; /* Start of current idle period */
  92. };
  93. static inline u32 red_rmask(u8 Plog)
  94. {
  95. return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
  96. }
  97. static inline void red_set_parms(struct red_parms *p,
  98. u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
  99. u8 Scell_log, u8 *stab)
  100. {
  101. /* Reset average queue length, the value is strictly bound
  102. * to the parameters below, reseting hurts a bit but leaving
  103. * it might result in an unreasonable qavg for a while. --TGR
  104. */
  105. p->qavg = 0;
  106. p->qcount = -1;
  107. p->qth_min = qth_min << Wlog;
  108. p->qth_max = qth_max << Wlog;
  109. p->Wlog = Wlog;
  110. p->Plog = Plog;
  111. p->Rmask = red_rmask(Plog);
  112. p->Scell_log = Scell_log;
  113. p->Scell_max = (255 << Scell_log);
  114. memcpy(p->Stab, stab, sizeof(p->Stab));
  115. }
  116. static inline int red_is_idling(struct red_parms *p)
  117. {
  118. return !PSCHED_IS_PASTPERFECT(p->qidlestart);
  119. }
  120. static inline void red_start_of_idle_period(struct red_parms *p)
  121. {
  122. PSCHED_GET_TIME(p->qidlestart);
  123. }
  124. static inline void red_end_of_idle_period(struct red_parms *p)
  125. {
  126. PSCHED_SET_PASTPERFECT(p->qidlestart);
  127. }
  128. static inline void red_restart(struct red_parms *p)
  129. {
  130. red_end_of_idle_period(p);
  131. p->qavg = 0;
  132. p->qcount = -1;
  133. }
  134. static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
  135. {
  136. psched_time_t now;
  137. long us_idle;
  138. int shift;
  139. PSCHED_GET_TIME(now);
  140. us_idle = PSCHED_TDIFF_SAFE(now, p->qidlestart, p->Scell_max);
  141. /*
  142. * The problem: ideally, average length queue recalcultion should
  143. * be done over constant clock intervals. This is too expensive, so
  144. * that the calculation is driven by outgoing packets.
  145. * When the queue is idle we have to model this clock by hand.
  146. *
  147. * SF+VJ proposed to "generate":
  148. *
  149. * m = idletime / (average_pkt_size / bandwidth)
  150. *
  151. * dummy packets as a burst after idle time, i.e.
  152. *
  153. * p->qavg *= (1-W)^m
  154. *
  155. * This is an apparently overcomplicated solution (f.e. we have to
  156. * precompute a table to make this calculation in reasonable time)
  157. * I believe that a simpler model may be used here,
  158. * but it is field for experiments.
  159. */
  160. shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
  161. if (shift)
  162. return p->qavg >> shift;
  163. else {
  164. /* Approximate initial part of exponent with linear function:
  165. *
  166. * (1-W)^m ~= 1-mW + ...
  167. *
  168. * Seems, it is the best solution to
  169. * problem of too coarse exponent tabulation.
  170. */
  171. us_idle = (p->qavg * us_idle) >> p->Scell_log;
  172. if (us_idle < (p->qavg >> 1))
  173. return p->qavg - us_idle;
  174. else
  175. return p->qavg >> 1;
  176. }
  177. }
  178. static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
  179. unsigned int backlog)
  180. {
  181. /*
  182. * NOTE: p->qavg is fixed point number with point at Wlog.
  183. * The formula below is equvalent to floating point
  184. * version:
  185. *
  186. * qavg = qavg*(1-W) + backlog*W;
  187. *
  188. * --ANK (980924)
  189. */
  190. return p->qavg + (backlog - (p->qavg >> p->Wlog));
  191. }
  192. static inline unsigned long red_calc_qavg(struct red_parms *p,
  193. unsigned int backlog)
  194. {
  195. if (!red_is_idling(p))
  196. return red_calc_qavg_no_idle_time(p, backlog);
  197. else
  198. return red_calc_qavg_from_idle_time(p);
  199. }
  200. static inline u32 red_random(struct red_parms *p)
  201. {
  202. return net_random() & p->Rmask;
  203. }
  204. static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
  205. {
  206. /* The formula used below causes questions.
  207. OK. qR is random number in the interval 0..Rmask
  208. i.e. 0..(2^Plog). If we used floating point
  209. arithmetics, it would be: (2^Plog)*rnd_num,
  210. where rnd_num is less 1.
  211. Taking into account, that qavg have fixed
  212. point at Wlog, and Plog is related to max_P by
  213. max_P = (qth_max-qth_min)/2^Plog; two lines
  214. below have the following floating point equivalent:
  215. max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
  216. Any questions? --ANK (980924)
  217. */
  218. return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR);
  219. }
  220. enum {
  221. RED_BELOW_MIN_THRESH,
  222. RED_BETWEEN_TRESH,
  223. RED_ABOVE_MAX_TRESH,
  224. };
  225. static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg)
  226. {
  227. if (qavg < p->qth_min)
  228. return RED_BELOW_MIN_THRESH;
  229. else if (qavg >= p->qth_max)
  230. return RED_ABOVE_MAX_TRESH;
  231. else
  232. return RED_BETWEEN_TRESH;
  233. }
  234. enum {
  235. RED_DONT_MARK,
  236. RED_PROB_MARK,
  237. RED_HARD_MARK,
  238. };
  239. static inline int red_action(struct red_parms *p, unsigned long qavg)
  240. {
  241. switch (red_cmp_thresh(p, qavg)) {
  242. case RED_BELOW_MIN_THRESH:
  243. p->qcount = -1;
  244. return RED_DONT_MARK;
  245. case RED_BETWEEN_TRESH:
  246. if (++p->qcount) {
  247. if (red_mark_probability(p, qavg)) {
  248. p->qcount = 0;
  249. p->qR = red_random(p);
  250. return RED_PROB_MARK;
  251. }
  252. } else
  253. p->qR = red_random(p);
  254. return RED_DONT_MARK;
  255. case RED_ABOVE_MAX_TRESH:
  256. p->qcount = -1;
  257. return RED_HARD_MARK;
  258. }
  259. BUG();
  260. return RED_DONT_MARK;
  261. }
  262. #endif