|
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
|
|
|
|
|
|
/* This function calculates a "timeout" which is equivalent to the timeout of a
|
|
|
* TCP connection after "boundary" unsuccessful, exponentially backed-off
|
|
|
- * retransmissions with an initial RTO of TCP_RTO_MIN.
|
|
|
+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
|
|
|
+ * syn_set flag is set.
|
|
|
*/
|
|
|
static bool retransmits_timed_out(struct sock *sk,
|
|
|
unsigned int boundary,
|
|
|
- unsigned int timeout)
|
|
|
+ unsigned int timeout,
|
|
|
+ bool syn_set)
|
|
|
{
|
|
|
unsigned int linear_backoff_thresh, start_ts;
|
|
|
+ unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
|
|
|
|
|
|
if (!inet_csk(sk)->icsk_retransmits)
|
|
|
return false;
|
|
@@ -152,12 +155,12 @@ static bool retransmits_timed_out(struct sock *sk,
|
|
|
start_ts = tcp_sk(sk)->retrans_stamp;
|
|
|
|
|
|
if (likely(timeout == 0)) {
|
|
|
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
|
|
|
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
|
|
|
|
|
|
if (boundary <= linear_backoff_thresh)
|
|
|
- timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
|
|
|
+ timeout = ((2 << boundary) - 1) * rto_base;
|
|
|
else
|
|
|
- timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
|
|
|
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
|
|
|
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
|
|
|
}
|
|
|
return (tcp_time_stamp - start_ts) >= timeout;
|
|
@@ -168,14 +171,15 @@ static int tcp_write_timeout(struct sock *sk)
|
|
|
{
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
int retry_until;
|
|
|
- bool do_reset;
|
|
|
+ bool do_reset, syn_set = 0;
|
|
|
|
|
|
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
|
|
if (icsk->icsk_retransmits)
|
|
|
dst_negative_advice(sk);
|
|
|
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
|
|
|
+ syn_set = 1;
|
|
|
} else {
|
|
|
- if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
|
|
|
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
|
|
|
/* Black hole detection */
|
|
|
tcp_mtu_probing(icsk, sk);
|
|
|
|
|
@@ -188,7 +192,7 @@ static int tcp_write_timeout(struct sock *sk)
|
|
|
|
|
|
retry_until = tcp_orphan_retries(sk, alive);
|
|
|
do_reset = alive ||
|
|
|
- !retransmits_timed_out(sk, retry_until, 0);
|
|
|
+ !retransmits_timed_out(sk, retry_until, 0, 0);
|
|
|
|
|
|
if (tcp_out_of_resources(sk, do_reset))
|
|
|
return 1;
|
|
@@ -196,8 +200,7 @@ static int tcp_write_timeout(struct sock *sk)
|
|
|
}
|
|
|
|
|
|
if (retransmits_timed_out(sk, retry_until,
|
|
|
- (1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) ? 0 :
|
|
|
- icsk->icsk_user_timeout)) {
|
|
|
+ syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
|
|
|
/* Has it gone just too far? */
|
|
|
tcp_write_err(sk);
|
|
|
return 1;
|
|
@@ -439,7 +442,7 @@ out_reset_timer:
|
|
|
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
|
|
|
}
|
|
|
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
|
|
|
- if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
|
|
|
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
|
|
|
__sk_dst_reset(sk);
|
|
|
|
|
|
out:;
|