tcp_compound.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * TCP Vegas congestion control
  3. *
  4. * This is based on the congestion detection/avoidance scheme described in
  5. * Lawrence S. Brakmo and Larry L. Peterson.
  6. * "TCP Vegas: End to end congestion avoidance on a global internet."
  7. * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480,
  8. * October 1995. Available from:
  9. * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps
  10. *
  11. * See http://www.cs.arizona.edu/xkernel/ for their implementation.
  12. * The main aspects that distinguish this implementation from the
  13. * Arizona Vegas implementation are:
  14. * o We do not change the loss detection or recovery mechanisms of
  15. * Linux in any way. Linux already recovers from losses quite well,
  16. * using fine-grained timers, NewReno, and FACK.
  17. * o To avoid the performance penalty imposed by increasing cwnd
  18. * only every-other RTT during slow start, we increase during
  19. * every RTT during slow start, just like Reno.
  20. * o Largely to allow continuous cwnd growth during slow start,
  21. * we use the rate at which ACKs come back as the "actual"
  22. * rate, rather than the rate at which data is sent.
  23. * o To speed convergence to the right rate, we set the cwnd
  24. * to achieve the right ("actual") rate when we exit slow start.
  25. * o To filter out the noise caused by delayed ACKs, we use the
  26. * minimum RTT sample observed during the last RTT to calculate
  27. * the actual rate.
  28. * o When the sender re-starts from idle, it waits until it has
  29. * received ACKs for an entire flight of new data before making
  30. * a cwnd adjustment decision. The original Vegas implementation
  31. * assumed senders never went idle.
  32. *
  33. *
  34. * TCP Compound based on TCP Vegas
  35. *
  36. * further details can be found here:
  37. * ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf
  38. */
  39. #include <linux/config.h>
  40. #include <linux/mm.h>
  41. #include <linux/module.h>
  42. #include <linux/skbuff.h>
  43. #include <linux/inet_diag.h>
  44. #include <net/tcp.h>
  45. /* Default values of the Vegas variables, in fixed-point representation
  46. * with V_PARAM_SHIFT bits to the right of the binary point.
  47. */
  48. #define V_PARAM_SHIFT 1
  49. #define TCP_COMPOUND_ALPHA 3U
  50. #define TCP_COMPOUND_BETA 1U
  51. #define TCP_COMPOUND_KAPPA_POW 3
  52. #define TCP_COMPOUND_KAPPA_NSQRT 2
  53. #define TCP_COMPOUND_GAMMA 30
  54. #define TCP_COMPOUND_ZETA 1
  55. /* TCP compound variables */
  56. struct compound {
  57. u32 beg_snd_nxt; /* right edge during last RTT */
  58. u32 beg_snd_una; /* left edge during last RTT */
  59. u32 beg_snd_cwnd; /* saves the size of the cwnd */
  60. u8 doing_vegas_now; /* if true, do vegas for this RTT */
  61. u16 cntRTT; /* # of RTTs measured within last RTT */
  62. u32 minRTT; /* min of RTTs measured within last RTT (in usec) */
  63. u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */
  64. u32 cwnd;
  65. u32 dwnd;
  66. };
  67. /* There are several situations when we must "re-start" Vegas:
  68. *
  69. * o when a connection is established
  70. * o after an RTO
  71. * o after fast recovery
  72. * o when we send a packet and there is no outstanding
  73. * unacknowledged data (restarting an idle connection)
  74. *
  75. * In these circumstances we cannot do a Vegas calculation at the
  76. * end of the first RTT, because any calculation we do is using
  77. * stale info -- both the saved cwnd and congestion feedback are
  78. * stale.
  79. *
  80. * Instead we must wait until the completion of an RTT during
  81. * which we actually receive ACKs.
  82. */
  83. static inline void vegas_enable(struct sock *sk)
  84. {
  85. const struct tcp_sock *tp = tcp_sk(sk);
  86. struct compound *vegas = inet_csk_ca(sk);
  87. /* Begin taking Vegas samples next time we send something. */
  88. vegas->doing_vegas_now = 1;
  89. /* Set the beginning of the next send window. */
  90. vegas->beg_snd_nxt = tp->snd_nxt;
  91. vegas->cntRTT = 0;
  92. vegas->minRTT = 0x7fffffff;
  93. }
  94. /* Stop taking Vegas samples for now. */
  95. static inline void vegas_disable(struct sock *sk)
  96. {
  97. struct compound *vegas = inet_csk_ca(sk);
  98. vegas->doing_vegas_now = 0;
  99. }
  100. static void tcp_compound_init(struct sock *sk)
  101. {
  102. struct compound *vegas = inet_csk_ca(sk);
  103. const struct tcp_sock *tp = tcp_sk(sk);
  104. vegas->baseRTT = 0x7fffffff;
  105. vegas_enable(sk);
  106. vegas->dwnd = 0;
  107. vegas->cwnd = tp->snd_cwnd;
  108. }
  109. /* Do RTT sampling needed for Vegas.
  110. * Basically we:
  111. * o min-filter RTT samples from within an RTT to get the current
  112. * propagation delay + queuing delay (we are min-filtering to try to
  113. * avoid the effects of delayed ACKs)
  114. * o min-filter RTT samples from a much longer window (forever for now)
  115. * to find the propagation delay (baseRTT)
  116. */
  117. static void tcp_compound_rtt_calc(struct sock *sk, u32 usrtt)
  118. {
  119. struct compound *vegas = inet_csk_ca(sk);
  120. u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */
  121. /* Filter to find propagation delay: */
  122. if (vrtt < vegas->baseRTT)
  123. vegas->baseRTT = vrtt;
  124. /* Find the min RTT during the last RTT to find
  125. * the current prop. delay + queuing delay:
  126. */
  127. vegas->minRTT = min(vegas->minRTT, vrtt);
  128. vegas->cntRTT++;
  129. }
  130. static void tcp_compound_state(struct sock *sk, u8 ca_state)
  131. {
  132. if (ca_state == TCP_CA_Open)
  133. vegas_enable(sk);
  134. else
  135. vegas_disable(sk);
  136. }
  137. /*
  138. * If the connection is idle and we are restarting,
  139. * then we don't want to do any Vegas calculations
  140. * until we get fresh RTT samples. So when we
  141. * restart, we reset our Vegas state to a clean
  142. * slate. After we get acks for this flight of
  143. * packets, _then_ we can make Vegas calculations
  144. * again.
  145. */
  146. static void tcp_compound_cwnd_event(struct sock *sk, enum tcp_ca_event event)
  147. {
  148. if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START)
  149. tcp_compound_init(sk);
  150. }
  151. static void tcp_compound_cong_avoid(struct sock *sk, u32 ack,
  152. u32 seq_rtt, u32 in_flight, int flag)
  153. {
  154. struct tcp_sock *tp = tcp_sk(sk);
  155. struct compound *vegas = inet_csk_ca(sk);
  156. u8 inc = 0;
  157. if (vegas->cwnd + vegas->dwnd > tp->snd_cwnd) {
  158. if (vegas->cwnd > tp->snd_cwnd || vegas->dwnd > tp->snd_cwnd) {
  159. vegas->cwnd = tp->snd_cwnd;
  160. vegas->dwnd = 0;
  161. } else
  162. vegas->cwnd = tp->snd_cwnd - vegas->dwnd;
  163. }
  164. if (!tcp_is_cwnd_limited(sk, in_flight))
  165. return;
  166. if (vegas->cwnd <= tp->snd_ssthresh)
  167. inc = 1;
  168. else if (tp->snd_cwnd_cnt < tp->snd_cwnd)
  169. tp->snd_cwnd_cnt++;
  170. if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
  171. inc = 1;
  172. tp->snd_cwnd_cnt = 0;
  173. }
  174. if (inc && tp->snd_cwnd < tp->snd_cwnd_clamp)
  175. vegas->cwnd++;
  176. /* The key players are v_beg_snd_una and v_beg_snd_nxt.
  177. *
  178. * These are so named because they represent the approximate values
  179. * of snd_una and snd_nxt at the beginning of the current RTT. More
  180. * precisely, they represent the amount of data sent during the RTT.
  181. * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
  182. * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
  183. * bytes of data have been ACKed during the course of the RTT, giving
  184. * an "actual" rate of:
  185. *
  186. * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
  187. *
  188. * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
  189. * because delayed ACKs can cover more than one segment, so they
  190. * don't line up nicely with the boundaries of RTTs.
  191. *
  192. * Another unfortunate fact of life is that delayed ACKs delay the
  193. * advance of the left edge of our send window, so that the number
  194. * of bytes we send in an RTT is often less than our cwnd will allow.
  195. * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
  196. */
  197. if (after(ack, vegas->beg_snd_nxt)) {
  198. /* Do the Vegas once-per-RTT cwnd adjustment. */
  199. u32 old_wnd, old_snd_cwnd;
  200. /* Here old_wnd is essentially the window of data that was
  201. * sent during the previous RTT, and has all
  202. * been acknowledged in the course of the RTT that ended
  203. * with the ACK we just received. Likewise, old_snd_cwnd
  204. * is the cwnd during the previous RTT.
  205. */
  206. if (!tp->mss_cache)
  207. return;
  208. old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
  209. tp->mss_cache;
  210. old_snd_cwnd = vegas->beg_snd_cwnd;
  211. /* Save the extent of the current window so we can use this
  212. * at the end of the next RTT.
  213. */
  214. vegas->beg_snd_una = vegas->beg_snd_nxt;
  215. vegas->beg_snd_nxt = tp->snd_nxt;
  216. vegas->beg_snd_cwnd = tp->snd_cwnd;
  217. /* We do the Vegas calculations only if we got enough RTT
  218. * samples that we can be reasonably sure that we got
  219. * at least one RTT sample that wasn't from a delayed ACK.
  220. * If we only had 2 samples total,
  221. * then that means we're getting only 1 ACK per RTT, which
  222. * means they're almost certainly delayed ACKs.
  223. * If we have 3 samples, we should be OK.
  224. */
  225. if (vegas->cntRTT > 2) {
  226. u32 rtt, target_cwnd, diff;
  227. u32 brtt, dwnd;
  228. /* We have enough RTT samples, so, using the Vegas
  229. * algorithm, we determine if we should increase or
  230. * decrease cwnd, and by how much.
  231. */
  232. /* Pluck out the RTT we are using for the Vegas
  233. * calculations. This is the min RTT seen during the
  234. * last RTT. Taking the min filters out the effects
  235. * of delayed ACKs, at the cost of noticing congestion
  236. * a bit later.
  237. */
  238. rtt = vegas->minRTT;
  239. /* Calculate the cwnd we should have, if we weren't
  240. * going too fast.
  241. *
  242. * This is:
  243. * (actual rate in segments) * baseRTT
  244. * We keep it as a fixed point number with
  245. * V_PARAM_SHIFT bits to the right of the binary point.
  246. */
  247. if (!rtt)
  248. return;
  249. brtt = vegas->baseRTT;
  250. target_cwnd = ((old_wnd * brtt)
  251. << V_PARAM_SHIFT) / rtt;
  252. /* Calculate the difference between the window we had,
  253. * and the window we would like to have. This quantity
  254. * is the "Diff" from the Arizona Vegas papers.
  255. *
  256. * Again, this is a fixed point number with
  257. * V_PARAM_SHIFT bits to the right of the binary
  258. * point.
  259. */
  260. diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
  261. dwnd = vegas->dwnd;
  262. if (diff < (TCP_COMPOUND_GAMMA << V_PARAM_SHIFT)) {
  263. u32 i, j, x, x2;
  264. u64 v;
  265. v = 1;
  266. for (i = 0; i < TCP_COMPOUND_KAPPA_POW; i++)
  267. v *= old_wnd;
  268. for (i = 0; i < TCP_COMPOUND_KAPPA_NSQRT; i++) {
  269. x = 1;
  270. for (j = 0; j < 200; j++) {
  271. x2 = (x + v / x) / 2;
  272. if (x2 == x || !x2)
  273. break;
  274. x = x2;
  275. }
  276. v = x;
  277. }
  278. x = (u32) v >> TCP_COMPOUND_ALPHA;
  279. if (x > 1)
  280. dwnd = x - 1;
  281. else
  282. dwnd = 0;
  283. dwnd += vegas->dwnd;
  284. } else if ((dwnd << V_PARAM_SHIFT) <
  285. (diff * TCP_COMPOUND_BETA))
  286. dwnd = 0;
  287. else
  288. dwnd =
  289. ((dwnd << V_PARAM_SHIFT) -
  290. (diff *
  291. TCP_COMPOUND_BETA)) >> V_PARAM_SHIFT;
  292. vegas->dwnd = dwnd;
  293. }
  294. /* Wipe the slate clean for the next RTT. */
  295. vegas->cntRTT = 0;
  296. vegas->minRTT = 0x7fffffff;
  297. }
  298. tp->snd_cwnd = vegas->cwnd + vegas->dwnd;
  299. }
  300. /* Extract info for Tcp socket info provided via netlink. */
  301. static void tcp_compound_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
  302. {
  303. const struct compound *ca = inet_csk_ca(sk);
  304. if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
  305. struct tcpvegas_info *info;
  306. info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO,
  307. sizeof(*info)));
  308. info->tcpv_enabled = ca->doing_vegas_now;
  309. info->tcpv_rttcnt = ca->cntRTT;
  310. info->tcpv_rtt = ca->baseRTT;
  311. info->tcpv_minrtt = ca->minRTT;
  312. rtattr_failure:;
  313. }
  314. }
  315. static struct tcp_congestion_ops tcp_compound = {
  316. .init = tcp_compound_init,
  317. .ssthresh = tcp_reno_ssthresh,
  318. .cong_avoid = tcp_compound_cong_avoid,
  319. .min_cwnd = tcp_reno_min_cwnd,
  320. .rtt_sample = tcp_compound_rtt_calc,
  321. .set_state = tcp_compound_state,
  322. .cwnd_event = tcp_compound_cwnd_event,
  323. .get_info = tcp_compound_get_info,
  324. .owner = THIS_MODULE,
  325. .name = "compound",
  326. };
  327. static int __init tcp_compound_register(void)
  328. {
  329. BUG_ON(sizeof(struct compound) > ICSK_CA_PRIV_SIZE);
  330. tcp_register_congestion_control(&tcp_compound);
  331. return 0;
  332. }
  333. static void __exit tcp_compound_unregister(void)
  334. {
  335. tcp_unregister_congestion_control(&tcp_compound);
  336. }
  337. module_init(tcp_compound_register);
  338. module_exit(tcp_compound_unregister);
  339. MODULE_AUTHOR("Angelo P. Castellani, Stephen Hemminger");
  340. MODULE_LICENSE("GPL");
  341. MODULE_DESCRIPTION("TCP Compound");