|
@@ -29,6 +29,7 @@ int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
|
|
|
int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
|
|
|
int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
|
|
|
int sysctl_tcp_orphan_retries __read_mostly;
|
|
|
+int sysctl_tcp_thin_linear_timeouts __read_mostly;
|
|
|
|
|
|
static void tcp_write_timer(unsigned long);
|
|
|
static void tcp_delack_timer(unsigned long);
|
|
@@ -415,7 +416,25 @@ void tcp_retransmit_timer(struct sock *sk)
|
|
|
icsk->icsk_retransmits++;
|
|
|
|
|
|
out_reset_timer:
|
|
|
- icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
|
|
|
+ /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
|
|
|
+ * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
|
|
|
+ * might be increased if the stream oscillates between thin and thick,
|
|
|
+ * thus the old value might already be too high compared to the value
|
|
|
+ * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
|
|
|
+ * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
|
|
|
+ * exponential backoff behaviour to avoid continue hammering
|
|
|
+ * linear-timeout retransmissions into a black hole
|
|
|
+ */
|
|
|
+ if (sk->sk_state == TCP_ESTABLISHED &&
|
|
|
+ (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
|
|
|
+ tcp_stream_is_thin(tp) &&
|
|
|
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
|
|
|
+ icsk->icsk_backoff = 0;
|
|
|
+ icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
|
|
|
+ } else {
|
|
|
+ /* Use normal (exponential) backoff */
|
|
|
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
|
|
|
+ }
|
|
|
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
|
|
|
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
|
|
|
__sk_dst_reset(sk);
|