tcp_htcp.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /*
  2. * H-TCP congestion control. The algorithm is detailed in:
  3. * R.N.Shorten, D.J.Leith:
  4. * "H-TCP: TCP for high-speed and long-distance networks"
  5. * Proc. PFLDnet, Argonne, 2004.
  6. * http://www.hamilton.ie/net/htcp3.pdf
  7. */
  8. #include <linux/config.h>
  9. #include <linux/mm.h>
  10. #include <linux/module.h>
  11. #include <net/tcp.h>
  12. #define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */
  13. #define BETA_MIN (1<<6) /* 0.5 with shift << 7 */
  14. #define BETA_MAX 102 /* 0.8 with shift << 7 */
  15. static int use_rtt_scaling = 1;
  16. module_param(use_rtt_scaling, int, 0644);
  17. MODULE_PARM_DESC(use_rtt_scaling, "turn on/off RTT scaling");
  18. static int use_bandwidth_switch = 1;
  19. module_param(use_bandwidth_switch, int, 0644);
  20. MODULE_PARM_DESC(use_bandwidth_switch, "turn on/off bandwidth switcher");
  21. struct htcp {
  22. u16 alpha; /* Fixed point arith, << 7 */
  23. u8 beta; /* Fixed point arith, << 7 */
  24. u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */
  25. u8 ccount; /* Number of RTTs since last congestion event */
  26. u8 undo_ccount;
  27. u16 pkts_acked;
  28. u32 packetcount;
  29. u32 minRTT;
  30. u32 maxRTT;
  31. u32 snd_cwnd_cnt2;
  32. u32 undo_maxRTT;
  33. u32 undo_old_maxB;
  34. /* Bandwidth estimation */
  35. u32 minB;
  36. u32 maxB;
  37. u32 old_maxB;
  38. u32 Bi;
  39. u32 lasttime;
  40. };
  41. static inline void htcp_reset(struct htcp *ca)
  42. {
  43. ca->undo_ccount = ca->ccount;
  44. ca->undo_maxRTT = ca->maxRTT;
  45. ca->undo_old_maxB = ca->old_maxB;
  46. ca->ccount = 0;
  47. ca->snd_cwnd_cnt2 = 0;
  48. }
  49. static u32 htcp_cwnd_undo(struct sock *sk)
  50. {
  51. const struct tcp_sock *tp = tcp_sk(sk);
  52. struct htcp *ca = inet_csk_ca(sk);
  53. ca->ccount = ca->undo_ccount;
  54. ca->maxRTT = ca->undo_maxRTT;
  55. ca->old_maxB = ca->undo_old_maxB;
  56. return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta);
  57. }
  58. static inline void measure_rtt(struct sock *sk)
  59. {
  60. const struct inet_connection_sock *icsk = inet_csk(sk);
  61. const struct tcp_sock *tp = tcp_sk(sk);
  62. struct htcp *ca = inet_csk_ca(sk);
  63. u32 srtt = tp->srtt>>3;
  64. /* keep track of minimum RTT seen so far, minRTT is zero at first */
  65. if (ca->minRTT > srtt || !ca->minRTT)
  66. ca->minRTT = srtt;
  67. /* max RTT */
  68. if (icsk->icsk_ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && ca->ccount > 3) {
  69. if (ca->maxRTT < ca->minRTT)
  70. ca->maxRTT = ca->minRTT;
  71. if (ca->maxRTT < srtt && srtt <= ca->maxRTT+msecs_to_jiffies(20))
  72. ca->maxRTT = srtt;
  73. }
  74. }
  75. static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
  76. {
  77. const struct inet_connection_sock *icsk = inet_csk(sk);
  78. const struct tcp_sock *tp = tcp_sk(sk);
  79. struct htcp *ca = inet_csk_ca(sk);
  80. u32 now = tcp_time_stamp;
  81. if (icsk->icsk_ca_state == TCP_CA_Open)
  82. ca->pkts_acked = pkts_acked;
  83. if (!use_bandwidth_switch)
  84. return;
  85. /* achieved throughput calculations */
  86. if (icsk->icsk_ca_state != TCP_CA_Open &&
  87. icsk->icsk_ca_state != TCP_CA_Disorder) {
  88. ca->packetcount = 0;
  89. ca->lasttime = now;
  90. return;
  91. }
  92. ca->packetcount += pkts_acked;
  93. if (ca->packetcount >= tp->snd_cwnd - (ca->alpha>>7? : 1)
  94. && now - ca->lasttime >= ca->minRTT
  95. && ca->minRTT > 0) {
  96. __u32 cur_Bi = ca->packetcount*HZ/(now - ca->lasttime);
  97. if (ca->ccount <= 3) {
  98. /* just after backoff */
  99. ca->minB = ca->maxB = ca->Bi = cur_Bi;
  100. } else {
  101. ca->Bi = (3*ca->Bi + cur_Bi)/4;
  102. if (ca->Bi > ca->maxB)
  103. ca->maxB = ca->Bi;
  104. if (ca->minB > ca->maxB)
  105. ca->minB = ca->maxB;
  106. }
  107. ca->packetcount = 0;
  108. ca->lasttime = now;
  109. }
  110. }
  111. static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT)
  112. {
  113. if (use_bandwidth_switch) {
  114. u32 maxB = ca->maxB;
  115. u32 old_maxB = ca->old_maxB;
  116. ca->old_maxB = ca->maxB;
  117. if (!between(5*maxB, 4*old_maxB, 6*old_maxB)) {
  118. ca->beta = BETA_MIN;
  119. ca->modeswitch = 0;
  120. return;
  121. }
  122. }
  123. if (ca->modeswitch && minRTT > msecs_to_jiffies(10) && maxRTT) {
  124. ca->beta = (minRTT<<7)/maxRTT;
  125. if (ca->beta < BETA_MIN)
  126. ca->beta = BETA_MIN;
  127. else if (ca->beta > BETA_MAX)
  128. ca->beta = BETA_MAX;
  129. } else {
  130. ca->beta = BETA_MIN;
  131. ca->modeswitch = 1;
  132. }
  133. }
  134. static inline void htcp_alpha_update(struct htcp *ca)
  135. {
  136. u32 minRTT = ca->minRTT;
  137. u32 factor = 1;
  138. u32 diff = ca->ccount * minRTT; /* time since last backoff */
  139. if (diff > HZ) {
  140. diff -= HZ;
  141. factor = 1+ ( 10*diff + ((diff/2)*(diff/2)/HZ) )/HZ;
  142. }
  143. if (use_rtt_scaling && minRTT) {
  144. u32 scale = (HZ<<3)/(10*minRTT);
  145. scale = min(max(scale, 1U<<2), 10U<<3); /* clamping ratio to interval [0.5,10]<<3 */
  146. factor = (factor<<3)/scale;
  147. if (!factor)
  148. factor = 1;
  149. }
  150. ca->alpha = 2*factor*((1<<7)-ca->beta);
  151. if (!ca->alpha)
  152. ca->alpha = ALPHA_BASE;
  153. }
  154. /* After we have the rtt data to calculate beta, we'd still prefer to wait one
  155. * rtt before we adjust our beta to ensure we are working from a consistent
  156. * data.
  157. *
  158. * This function should be called when we hit a congestion event since only at
  159. * that point do we really have a real sense of maxRTT (the queues en route
  160. * were getting just too full now).
  161. */
  162. static void htcp_param_update(struct sock *sk)
  163. {
  164. struct htcp *ca = inet_csk_ca(sk);
  165. u32 minRTT = ca->minRTT;
  166. u32 maxRTT = ca->maxRTT;
  167. htcp_beta_update(ca, minRTT, maxRTT);
  168. htcp_alpha_update(ca);
  169. /* add slowly fading memory for maxRTT to accommodate routing changes etc */
  170. if (minRTT > 0 && maxRTT > minRTT)
  171. ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100;
  172. }
  173. static u32 htcp_recalc_ssthresh(struct sock *sk)
  174. {
  175. const struct tcp_sock *tp = tcp_sk(sk);
  176. const struct htcp *ca = inet_csk_ca(sk);
  177. htcp_param_update(sk);
  178. return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
  179. }
  180. static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
  181. u32 in_flight, int data_acked)
  182. {
  183. struct tcp_sock *tp = tcp_sk(sk);
  184. struct htcp *ca = inet_csk_ca(sk);
  185. if (!tcp_is_cwnd_limited(sk, in_flight))
  186. return;
  187. if (tp->snd_cwnd <= tp->snd_ssthresh)
  188. tcp_slow_start(tp);
  189. else {
  190. measure_rtt(sk);
  191. /* keep track of number of round-trip times since last backoff event */
  192. if (ca->snd_cwnd_cnt2 >= tp->snd_cwnd) {
  193. ca->ccount++;
  194. ca->snd_cwnd_cnt2 -= tp->snd_cwnd;
  195. htcp_alpha_update(ca);
  196. } else
  197. ca->snd_cwnd_cnt2 += ca->pkts_acked;
  198. /* In dangerous area, increase slowly.
  199. * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
  200. */
  201. if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) {
  202. if (tp->snd_cwnd < tp->snd_cwnd_clamp)
  203. tp->snd_cwnd++;
  204. tp->snd_cwnd_cnt = 0;
  205. } else
  206. tp->snd_cwnd_cnt += ca->pkts_acked;
  207. ca->pkts_acked = 1;
  208. }
  209. }
  210. /* Lower bound on congestion window. */
  211. static u32 htcp_min_cwnd(struct sock *sk)
  212. {
  213. const struct tcp_sock *tp = tcp_sk(sk);
  214. return tp->snd_ssthresh;
  215. }
  216. static void htcp_init(struct sock *sk)
  217. {
  218. struct htcp *ca = inet_csk_ca(sk);
  219. memset(ca, 0, sizeof(struct htcp));
  220. ca->alpha = ALPHA_BASE;
  221. ca->beta = BETA_MIN;
  222. ca->pkts_acked = 1;
  223. }
  224. static void htcp_state(struct sock *sk, u8 new_state)
  225. {
  226. switch (new_state) {
  227. case TCP_CA_CWR:
  228. case TCP_CA_Recovery:
  229. case TCP_CA_Loss:
  230. htcp_reset(inet_csk_ca(sk));
  231. break;
  232. }
  233. }
  234. static struct tcp_congestion_ops htcp = {
  235. .init = htcp_init,
  236. .ssthresh = htcp_recalc_ssthresh,
  237. .min_cwnd = htcp_min_cwnd,
  238. .cong_avoid = htcp_cong_avoid,
  239. .set_state = htcp_state,
  240. .undo_cwnd = htcp_cwnd_undo,
  241. .pkts_acked = measure_achieved_throughput,
  242. .owner = THIS_MODULE,
  243. .name = "htcp",
  244. };
  245. static int __init htcp_register(void)
  246. {
  247. BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE);
  248. BUILD_BUG_ON(BETA_MIN >= BETA_MAX);
  249. return tcp_register_congestion_control(&htcp);
  250. }
  251. static void __exit htcp_unregister(void)
  252. {
  253. tcp_unregister_congestion_control(&htcp);
  254. }
  255. module_init(htcp_register);
  256. module_exit(htcp_unregister);
  257. MODULE_AUTHOR("Baruch Even");
  258. MODULE_LICENSE("GPL");
  259. MODULE_DESCRIPTION("H-TCP");