tcp_westwood.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * TCP Westwood+
  3. *
  4. * Angelo Dell'Aera: TCP Westwood+ support
  5. */
  6. #include <linux/config.h>
  7. #include <linux/mm.h>
  8. #include <linux/module.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/inet_diag.h>
  11. #include <net/tcp.h>
  12. /* TCP Westwood structure */
  13. struct westwood {
  14. u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
  15. u32 bw_est; /* bandwidth estimate */
  16. u32 rtt_win_sx; /* here starts a new evaluation... */
  17. u32 bk;
  18. u32 snd_una; /* used for evaluating the number of acked bytes */
  19. u32 cumul_ack;
  20. u32 accounted;
  21. u32 rtt;
  22. u32 rtt_min; /* minimum observed RTT */
  23. };
  24. /* TCP Westwood functions and constants */
  25. #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
  26. #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
  27. /*
  28. * @tcp_westwood_create
  29. * This function initializes fields used in TCP Westwood+,
  30. * it is called after the initial SYN, so the sequence numbers
  31. * are correct but new passive connections we have no
  32. * information about RTTmin at this time so we simply set it to
  33. * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
  34. * since in this way we're sure it will be updated in a consistent
  35. * way as soon as possible. It will reasonably happen within the first
  36. * RTT period of the connection lifetime.
  37. */
  38. static void tcp_westwood_init(struct sock *sk)
  39. {
  40. struct westwood *w = inet_csk_ca(sk);
  41. w->bk = 0;
  42. w->bw_ns_est = 0;
  43. w->bw_est = 0;
  44. w->accounted = 0;
  45. w->cumul_ack = 0;
  46. w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
  47. w->rtt_win_sx = tcp_time_stamp;
  48. w->snd_una = tcp_sk(sk)->snd_una;
  49. }
  50. /*
  51. * @westwood_do_filter
  52. * Low-pass filter. Implemented using constant coefficients.
  53. */
  54. static inline u32 westwood_do_filter(u32 a, u32 b)
  55. {
  56. return (((7 * a) + b) >> 3);
  57. }
  58. static inline void westwood_filter(struct westwood *w, u32 delta)
  59. {
  60. w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
  61. w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
  62. }
  63. /*
  64. * @westwood_pkts_acked
  65. * Called after processing group of packets.
  66. * but all westwood needs is the last sample of srtt.
  67. */
  68. static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
  69. {
  70. struct westwood *w = inet_csk_ca(sk);
  71. if (cnt > 0)
  72. w->rtt = tcp_sk(sk)->srtt >> 3;
  73. }
  74. /*
  75. * @westwood_update_window
  76. * It updates RTT evaluation window if it is the right moment to do
  77. * it. If so it calls filter for evaluating bandwidth.
  78. */
  79. static void westwood_update_window(struct sock *sk)
  80. {
  81. struct westwood *w = inet_csk_ca(sk);
  82. s32 delta = tcp_time_stamp - w->rtt_win_sx;
  83. /*
  84. * See if a RTT-window has passed.
  85. * Be careful since if RTT is less than
  86. * 50ms we don't filter but we continue 'building the sample'.
  87. * This minimum limit was chosen since an estimation on small
  88. * time intervals is better to avoid...
  89. * Obviously on a LAN we reasonably will always have
  90. * right_bound = left_bound + WESTWOOD_RTT_MIN
  91. */
  92. if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
  93. westwood_filter(w, delta);
  94. w->bk = 0;
  95. w->rtt_win_sx = tcp_time_stamp;
  96. }
  97. }
  98. /*
  99. * @westwood_fast_bw
  100. * It is called when we are in fast path. In particular it is called when
  101. * header prediction is successful. In such case in fact update is
  102. * straight forward and doesn't need any particular care.
  103. */
  104. static inline void westwood_fast_bw(struct sock *sk)
  105. {
  106. const struct tcp_sock *tp = tcp_sk(sk);
  107. struct westwood *w = inet_csk_ca(sk);
  108. westwood_update_window(sk);
  109. w->bk += tp->snd_una - w->snd_una;
  110. w->snd_una = tp->snd_una;
  111. w->rtt_min = min(w->rtt, w->rtt_min);
  112. }
  113. /*
  114. * @westwood_acked_count
  115. * This function evaluates cumul_ack for evaluating bk in case of
  116. * delayed or partial acks.
  117. */
  118. static inline u32 westwood_acked_count(struct sock *sk)
  119. {
  120. const struct tcp_sock *tp = tcp_sk(sk);
  121. struct westwood *w = inet_csk_ca(sk);
  122. w->cumul_ack = tp->snd_una - w->snd_una;
  123. /* If cumul_ack is 0 this is a dupack since it's not moving
  124. * tp->snd_una.
  125. */
  126. if (!w->cumul_ack) {
  127. w->accounted += tp->mss_cache;
  128. w->cumul_ack = tp->mss_cache;
  129. }
  130. if (w->cumul_ack > tp->mss_cache) {
  131. /* Partial or delayed ack */
  132. if (w->accounted >= w->cumul_ack) {
  133. w->accounted -= w->cumul_ack;
  134. w->cumul_ack = tp->mss_cache;
  135. } else {
  136. w->cumul_ack -= w->accounted;
  137. w->accounted = 0;
  138. }
  139. }
  140. w->snd_una = tp->snd_una;
  141. return w->cumul_ack;
  142. }
  143. static inline u32 westwood_bw_rttmin(const struct sock *sk)
  144. {
  145. const struct tcp_sock *tp = tcp_sk(sk);
  146. const struct westwood *w = inet_csk_ca(sk);
  147. return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
  148. }
  149. /*
  150. * TCP Westwood
  151. * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
  152. * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
  153. * so avoids ever returning 0.
  154. */
  155. static u32 tcp_westwood_cwnd_min(struct sock *sk)
  156. {
  157. return westwood_bw_rttmin(sk);
  158. }
  159. static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
  160. {
  161. struct tcp_sock *tp = tcp_sk(sk);
  162. struct westwood *w = inet_csk_ca(sk);
  163. switch(event) {
  164. case CA_EVENT_FAST_ACK:
  165. westwood_fast_bw(sk);
  166. break;
  167. case CA_EVENT_COMPLETE_CWR:
  168. tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk);
  169. break;
  170. case CA_EVENT_FRTO:
  171. tp->snd_ssthresh = westwood_bw_rttmin(sk);
  172. break;
  173. case CA_EVENT_SLOW_ACK:
  174. westwood_update_window(sk);
  175. w->bk += westwood_acked_count(sk);
  176. w->rtt_min = min(w->rtt, w->rtt_min);
  177. break;
  178. default:
  179. /* don't care */
  180. break;
  181. }
  182. }
  183. /* Extract info for Tcp socket info provided via netlink. */
  184. static void tcp_westwood_info(struct sock *sk, u32 ext,
  185. struct sk_buff *skb)
  186. {
  187. const struct westwood *ca = inet_csk_ca(sk);
  188. if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
  189. struct rtattr *rta;
  190. struct tcpvegas_info *info;
  191. rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
  192. info = RTA_DATA(rta);
  193. info->tcpv_enabled = 1;
  194. info->tcpv_rttcnt = 0;
  195. info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
  196. info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
  197. rtattr_failure: ;
  198. }
  199. }
  200. static struct tcp_congestion_ops tcp_westwood = {
  201. .init = tcp_westwood_init,
  202. .ssthresh = tcp_reno_ssthresh,
  203. .cong_avoid = tcp_reno_cong_avoid,
  204. .min_cwnd = tcp_westwood_cwnd_min,
  205. .cwnd_event = tcp_westwood_event,
  206. .get_info = tcp_westwood_info,
  207. .pkts_acked = tcp_westwood_pkts_acked,
  208. .owner = THIS_MODULE,
  209. .name = "westwood"
  210. };
  211. static int __init tcp_westwood_register(void)
  212. {
  213. BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
  214. return tcp_register_congestion_control(&tcp_westwood);
  215. }
  216. static void __exit tcp_westwood_unregister(void)
  217. {
  218. tcp_unregister_congestion_control(&tcp_westwood);
  219. }
  220. module_init(tcp_westwood_register);
  221. module_exit(tcp_westwood_unregister);
  222. MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
  223. MODULE_LICENSE("GPL");
  224. MODULE_DESCRIPTION("TCP Westwood+");