tcp_compound.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. /*
  2. * TCP Vegas congestion control
  3. *
  4. * This is based on the congestion detection/avoidance scheme described in
  5. * Lawrence S. Brakmo and Larry L. Peterson.
  6. * "TCP Vegas: End to end congestion avoidance on a global internet."
  7. * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480,
  8. * October 1995. Available from:
  9. * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps
  10. *
  11. * See http://www.cs.arizona.edu/xkernel/ for their implementation.
  12. * The main aspects that distinguish this implementation from the
  13. * Arizona Vegas implementation are:
  14. * o We do not change the loss detection or recovery mechanisms of
  15. * Linux in any way. Linux already recovers from losses quite well,
  16. * using fine-grained timers, NewReno, and FACK.
  17. * o To avoid the performance penalty imposed by increasing cwnd
  18. * only every-other RTT during slow start, we increase during
  19. * every RTT during slow start, just like Reno.
  20. * o Largely to allow continuous cwnd growth during slow start,
  21. * we use the rate at which ACKs come back as the "actual"
  22. * rate, rather than the rate at which data is sent.
  23. * o To speed convergence to the right rate, we set the cwnd
  24. * to achieve the right ("actual") rate when we exit slow start.
  25. * o To filter out the noise caused by delayed ACKs, we use the
  26. * minimum RTT sample observed during the last RTT to calculate
  27. * the actual rate.
  28. * o When the sender re-starts from idle, it waits until it has
  29. * received ACKs for an entire flight of new data before making
  30. * a cwnd adjustment decision. The original Vegas implementation
  31. * assumed senders never went idle.
  32. *
  33. *
  34. * TCP Compound based on TCP Vegas
  35. *
  36. * further details can be found here:
  37. * ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf
  38. */
  39. #include <linux/config.h>
  40. #include <linux/mm.h>
  41. #include <linux/module.h>
  42. #include <linux/skbuff.h>
  43. #include <linux/inet_diag.h>
  44. #include <net/tcp.h>
  45. /* Default values of the Vegas variables, in fixed-point representation
  46. * with V_PARAM_SHIFT bits to the right of the binary point.
  47. */
  48. #define V_PARAM_SHIFT 1
  49. #define TCP_COMPOUND_ALPHA 3U
  50. #define TCP_COMPOUND_BETA 1U
  51. #define TCP_COMPOUND_GAMMA 30
  52. #define TCP_COMPOUND_ZETA 1
  53. /* TCP compound variables */
  54. struct compound {
  55. u32 beg_snd_nxt; /* right edge during last RTT */
  56. u32 beg_snd_una; /* left edge during last RTT */
  57. u32 beg_snd_cwnd; /* saves the size of the cwnd */
  58. u8 doing_vegas_now; /* if true, do vegas for this RTT */
  59. u16 cntRTT; /* # of RTTs measured within last RTT */
  60. u32 minRTT; /* min of RTTs measured within last RTT (in usec) */
  61. u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */
  62. u32 cwnd;
  63. u32 dwnd;
  64. };
  65. /* There are several situations when we must "re-start" Vegas:
  66. *
  67. * o when a connection is established
  68. * o after an RTO
  69. * o after fast recovery
  70. * o when we send a packet and there is no outstanding
  71. * unacknowledged data (restarting an idle connection)
  72. *
  73. * In these circumstances we cannot do a Vegas calculation at the
  74. * end of the first RTT, because any calculation we do is using
  75. * stale info -- both the saved cwnd and congestion feedback are
  76. * stale.
  77. *
  78. * Instead we must wait until the completion of an RTT during
  79. * which we actually receive ACKs.
  80. */
  81. static inline void vegas_enable(struct sock *sk)
  82. {
  83. const struct tcp_sock *tp = tcp_sk(sk);
  84. struct compound *vegas = inet_csk_ca(sk);
  85. /* Begin taking Vegas samples next time we send something. */
  86. vegas->doing_vegas_now = 1;
  87. /* Set the beginning of the next send window. */
  88. vegas->beg_snd_nxt = tp->snd_nxt;
  89. vegas->cntRTT = 0;
  90. vegas->minRTT = 0x7fffffff;
  91. }
  92. /* Stop taking Vegas samples for now. */
  93. static inline void vegas_disable(struct sock *sk)
  94. {
  95. struct compound *vegas = inet_csk_ca(sk);
  96. vegas->doing_vegas_now = 0;
  97. }
  98. static void tcp_compound_init(struct sock *sk)
  99. {
  100. struct compound *vegas = inet_csk_ca(sk);
  101. const struct tcp_sock *tp = tcp_sk(sk);
  102. vegas->baseRTT = 0x7fffffff;
  103. vegas_enable(sk);
  104. vegas->dwnd = 0;
  105. vegas->cwnd = tp->snd_cwnd;
  106. }
  107. /* Do RTT sampling needed for Vegas.
  108. * Basically we:
  109. * o min-filter RTT samples from within an RTT to get the current
  110. * propagation delay + queuing delay (we are min-filtering to try to
  111. * avoid the effects of delayed ACKs)
  112. * o min-filter RTT samples from a much longer window (forever for now)
  113. * to find the propagation delay (baseRTT)
  114. */
  115. static void tcp_compound_rtt_calc(struct sock *sk, u32 usrtt)
  116. {
  117. struct compound *vegas = inet_csk_ca(sk);
  118. u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */
  119. /* Filter to find propagation delay: */
  120. if (vrtt < vegas->baseRTT)
  121. vegas->baseRTT = vrtt;
  122. /* Find the min RTT during the last RTT to find
  123. * the current prop. delay + queuing delay:
  124. */
  125. vegas->minRTT = min(vegas->minRTT, vrtt);
  126. vegas->cntRTT++;
  127. }
  128. static void tcp_compound_state(struct sock *sk, u8 ca_state)
  129. {
  130. if (ca_state == TCP_CA_Open)
  131. vegas_enable(sk);
  132. else
  133. vegas_disable(sk);
  134. }
  135. /* 64bit divisor, dividend and result. dynamic precision */
  136. static inline u64 div64_64(u64 dividend, u64 divisor)
  137. {
  138. u32 d = divisor;
  139. if (divisor > 0xffffffffULL) {
  140. unsigned int shift = fls(divisor >> 32);
  141. d = divisor >> shift;
  142. dividend >>= shift;
  143. }
  144. /* avoid 64 bit division if possible */
  145. if (dividend >> 32)
  146. do_div(dividend, d);
  147. else
  148. dividend = (u32) dividend / d;
  149. return dividend;
  150. }
  151. /* calculate the quartic root of "a" using Newton-Raphson */
  152. static u32 qroot(u64 a)
  153. {
  154. u32 x, x1;
  155. /* Initial estimate is based on:
  156. * qrt(x) = exp(log(x) / 4)
  157. */
  158. x = 1u << (fls64(a) >> 2);
  159. /*
  160. * Iteration based on:
  161. * 3
  162. * x = ( 3 * x + a / x ) / 4
  163. * k+1 k k
  164. */
  165. do {
  166. u64 x3 = x;
  167. x1 = x;
  168. x3 *= x;
  169. x3 *= x;
  170. x = (3 * x + (u32) div64_64(a, x3)) / 4;
  171. } while (abs(x1 - x) > 1);
  172. return x;
  173. }
  174. /*
  175. * If the connection is idle and we are restarting,
  176. * then we don't want to do any Vegas calculations
  177. * until we get fresh RTT samples. So when we
  178. * restart, we reset our Vegas state to a clean
  179. * slate. After we get acks for this flight of
  180. * packets, _then_ we can make Vegas calculations
  181. * again.
  182. */
  183. static void tcp_compound_cwnd_event(struct sock *sk, enum tcp_ca_event event)
  184. {
  185. if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START)
  186. tcp_compound_init(sk);
  187. }
  188. static void tcp_compound_cong_avoid(struct sock *sk, u32 ack,
  189. u32 seq_rtt, u32 in_flight, int flag)
  190. {
  191. struct tcp_sock *tp = tcp_sk(sk);
  192. struct compound *vegas = inet_csk_ca(sk);
  193. u8 inc = 0;
  194. if (vegas->cwnd + vegas->dwnd > tp->snd_cwnd) {
  195. if (vegas->cwnd > tp->snd_cwnd || vegas->dwnd > tp->snd_cwnd) {
  196. vegas->cwnd = tp->snd_cwnd;
  197. vegas->dwnd = 0;
  198. } else
  199. vegas->cwnd = tp->snd_cwnd - vegas->dwnd;
  200. }
  201. if (!tcp_is_cwnd_limited(sk, in_flight))
  202. return;
  203. if (vegas->cwnd <= tp->snd_ssthresh)
  204. inc = 1;
  205. else if (tp->snd_cwnd_cnt < tp->snd_cwnd)
  206. tp->snd_cwnd_cnt++;
  207. if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
  208. inc = 1;
  209. tp->snd_cwnd_cnt = 0;
  210. }
  211. if (inc && tp->snd_cwnd < tp->snd_cwnd_clamp)
  212. vegas->cwnd++;
  213. /* The key players are v_beg_snd_una and v_beg_snd_nxt.
  214. *
  215. * These are so named because they represent the approximate values
  216. * of snd_una and snd_nxt at the beginning of the current RTT. More
  217. * precisely, they represent the amount of data sent during the RTT.
  218. * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
  219. * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
  220. * bytes of data have been ACKed during the course of the RTT, giving
  221. * an "actual" rate of:
  222. *
  223. * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
  224. *
  225. * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
  226. * because delayed ACKs can cover more than one segment, so they
  227. * don't line up nicely with the boundaries of RTTs.
  228. *
  229. * Another unfortunate fact of life is that delayed ACKs delay the
  230. * advance of the left edge of our send window, so that the number
  231. * of bytes we send in an RTT is often less than our cwnd will allow.
  232. * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
  233. */
  234. if (after(ack, vegas->beg_snd_nxt)) {
  235. /* Do the Vegas once-per-RTT cwnd adjustment. */
  236. u32 old_wnd, old_snd_cwnd;
  237. /* Here old_wnd is essentially the window of data that was
  238. * sent during the previous RTT, and has all
  239. * been acknowledged in the course of the RTT that ended
  240. * with the ACK we just received. Likewise, old_snd_cwnd
  241. * is the cwnd during the previous RTT.
  242. */
  243. if (!tp->mss_cache)
  244. return;
  245. old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
  246. tp->mss_cache;
  247. old_snd_cwnd = vegas->beg_snd_cwnd;
  248. /* Save the extent of the current window so we can use this
  249. * at the end of the next RTT.
  250. */
  251. vegas->beg_snd_una = vegas->beg_snd_nxt;
  252. vegas->beg_snd_nxt = tp->snd_nxt;
  253. vegas->beg_snd_cwnd = tp->snd_cwnd;
  254. /* We do the Vegas calculations only if we got enough RTT
  255. * samples that we can be reasonably sure that we got
  256. * at least one RTT sample that wasn't from a delayed ACK.
  257. * If we only had 2 samples total,
  258. * then that means we're getting only 1 ACK per RTT, which
  259. * means they're almost certainly delayed ACKs.
  260. * If we have 3 samples, we should be OK.
  261. */
  262. if (vegas->cntRTT > 2) {
  263. u32 rtt, target_cwnd, diff;
  264. u32 brtt, dwnd;
  265. /* We have enough RTT samples, so, using the Vegas
  266. * algorithm, we determine if we should increase or
  267. * decrease cwnd, and by how much.
  268. */
  269. /* Pluck out the RTT we are using for the Vegas
  270. * calculations. This is the min RTT seen during the
  271. * last RTT. Taking the min filters out the effects
  272. * of delayed ACKs, at the cost of noticing congestion
  273. * a bit later.
  274. */
  275. rtt = vegas->minRTT;
  276. /* Calculate the cwnd we should have, if we weren't
  277. * going too fast.
  278. *
  279. * This is:
  280. * (actual rate in segments) * baseRTT
  281. * We keep it as a fixed point number with
  282. * V_PARAM_SHIFT bits to the right of the binary point.
  283. */
  284. if (!rtt)
  285. return;
  286. brtt = vegas->baseRTT;
  287. target_cwnd = ((old_wnd * brtt)
  288. << V_PARAM_SHIFT) / rtt;
  289. /* Calculate the difference between the window we had,
  290. * and the window we would like to have. This quantity
  291. * is the "Diff" from the Arizona Vegas papers.
  292. *
  293. * Again, this is a fixed point number with
  294. * V_PARAM_SHIFT bits to the right of the binary
  295. * point.
  296. */
  297. diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
  298. dwnd = vegas->dwnd;
  299. if (diff < (TCP_COMPOUND_GAMMA << V_PARAM_SHIFT)) {
  300. u64 v;
  301. u32 x;
  302. /*
  303. * The TCP Compound paper describes the choice
  304. * of "k" determines the agressiveness,
  305. * ie. slope of the response function.
  306. *
  307. * For same value as HSTCP would be 0.8
  308. * but for computaional reasons, both the
  309. * original authors and this implementation
  310. * use 0.75.
  311. */
  312. v = old_wnd;
  313. x = qroot(v * v * v) >> TCP_COMPOUND_ALPHA;
  314. if (x > 1)
  315. dwnd = x - 1;
  316. else
  317. dwnd = 0;
  318. dwnd += vegas->dwnd;
  319. } else if ((dwnd << V_PARAM_SHIFT) <
  320. (diff * TCP_COMPOUND_BETA))
  321. dwnd = 0;
  322. else
  323. dwnd =
  324. ((dwnd << V_PARAM_SHIFT) -
  325. (diff *
  326. TCP_COMPOUND_BETA)) >> V_PARAM_SHIFT;
  327. vegas->dwnd = dwnd;
  328. }
  329. /* Wipe the slate clean for the next RTT. */
  330. vegas->cntRTT = 0;
  331. vegas->minRTT = 0x7fffffff;
  332. }
  333. tp->snd_cwnd = vegas->cwnd + vegas->dwnd;
  334. }
  335. /* Extract info for Tcp socket info provided via netlink. */
  336. static void tcp_compound_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
  337. {
  338. const struct compound *ca = inet_csk_ca(sk);
  339. if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
  340. struct tcpvegas_info *info;
  341. info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO,
  342. sizeof(*info)));
  343. info->tcpv_enabled = ca->doing_vegas_now;
  344. info->tcpv_rttcnt = ca->cntRTT;
  345. info->tcpv_rtt = ca->baseRTT;
  346. info->tcpv_minrtt = ca->minRTT;
  347. rtattr_failure:;
  348. }
  349. }
  350. static struct tcp_congestion_ops tcp_compound = {
  351. .init = tcp_compound_init,
  352. .ssthresh = tcp_reno_ssthresh,
  353. .cong_avoid = tcp_compound_cong_avoid,
  354. .rtt_sample = tcp_compound_rtt_calc,
  355. .set_state = tcp_compound_state,
  356. .cwnd_event = tcp_compound_cwnd_event,
  357. .get_info = tcp_compound_get_info,
  358. .owner = THIS_MODULE,
  359. .name = "compound",
  360. };
  361. static int __init tcp_compound_register(void)
  362. {
  363. BUG_ON(sizeof(struct compound) > ICSK_CA_PRIV_SIZE);
  364. tcp_register_congestion_control(&tcp_compound);
  365. return 0;
  366. }
  367. static void __exit tcp_compound_unregister(void)
  368. {
  369. tcp_unregister_congestion_control(&tcp_compound);
  370. }
  371. module_init(tcp_compound_register);
  372. module_exit(tcp_compound_unregister);
  373. MODULE_AUTHOR("Angelo P. Castellani, Stephen Hemminger");
  374. MODULE_LICENSE("GPL");
  375. MODULE_DESCRIPTION("TCP Compound");