|
@@ -1252,6 +1252,10 @@ int tcp_use_frto(const struct sock *sk)
|
|
|
/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
|
|
|
* recovery a bit and use heuristics in tcp_process_frto() to detect if
|
|
|
* the RTO was spurious.
|
|
|
+ *
|
|
|
+ * Do like tcp_enter_loss() would; when RTO expires the second time it
|
|
|
+ * does:
|
|
|
+ * "Reduce ssthresh if it has not yet been made inside this window."
|
|
|
*/
|
|
|
void tcp_enter_frto(struct sock *sk)
|
|
|
{
|
|
@@ -1259,11 +1263,10 @@ void tcp_enter_frto(struct sock *sk)
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
- tp->frto_counter = 1;
|
|
|
-
|
|
|
- if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
|
|
|
+ if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
|
|
|
tp->snd_una == tp->high_seq ||
|
|
|
- (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
|
|
|
+ ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
|
|
|
+ !icsk->icsk_retransmits)) {
|
|
|
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
|
|
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
|
|
|
tcp_ca_event(sk, CA_EVENT_FRTO);
|
|
@@ -1285,6 +1288,7 @@ void tcp_enter_frto(struct sock *sk)
|
|
|
|
|
|
tcp_set_ca_state(sk, TCP_CA_Open);
|
|
|
tp->frto_highmark = tp->snd_nxt;
|
|
|
+ tp->frto_counter = 1;
|
|
|
}
|
|
|
|
|
|
/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
|
|
@@ -2513,12 +2517,16 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
|
|
|
* to prove that the RTO is indeed spurious. It transfers the control
|
|
|
* from F-RTO to the conventional RTO recovery
|
|
|
*/
|
|
|
-static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
|
|
|
+static void tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
tcp_sync_left_out(tp);
|
|
|
|
|
|
+ /* Duplicate the behavior from Loss state (fastretrans_alert) */
|
|
|
+ if (flag&FLAG_DATA_ACKED)
|
|
|
+ inet_csk(sk)->icsk_retransmits = 0;
|
|
|
+
|
|
|
if (tp->snd_una == prior_snd_una ||
|
|
|
!before(tp->snd_una, tp->frto_highmark)) {
|
|
|
tcp_enter_frto_loss(sk);
|
|
@@ -2607,7 +2615,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
|
|
|
|
|
|
if (tp->frto_counter)
|
|
|
- tcp_process_frto(sk, prior_snd_una);
|
|
|
+ tcp_process_frto(sk, prior_snd_una, flag);
|
|
|
|
|
|
if (tcp_ack_is_dubious(sk, flag)) {
|
|
|
/* Advance CWND, if state allows this. */
|