|
@@ -2659,7 +2659,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
|
|
#define DBGUNDO(x...) do { } while (0)
|
|
#define DBGUNDO(x...) do { } while (0)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static void tcp_undo_cwr(struct sock *sk, const int undo)
|
|
|
|
|
|
+static void tcp_undo_cwr(struct sock *sk, const int undo_ssthresh)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
@@ -2671,14 +2671,13 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
|
|
else
|
|
else
|
|
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
|
|
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
|
|
|
|
|
|
- if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
|
|
|
|
|
|
+ if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) {
|
|
tp->snd_ssthresh = tp->prior_ssthresh;
|
|
tp->snd_ssthresh = tp->prior_ssthresh;
|
|
TCP_ECN_withdraw_cwr(tp);
|
|
TCP_ECN_withdraw_cwr(tp);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
|
|
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
|
|
}
|
|
}
|
|
- tcp_moderate_cwnd(tp);
|
|
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2822,8 +2821,11 @@ static int tcp_try_undo_loss(struct sock *sk)
|
|
static inline void tcp_complete_cwr(struct sock *sk)
|
|
static inline void tcp_complete_cwr(struct sock *sk)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
- tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
|
|
|
|
- tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
|
|
|
|
+ /* Do not moderate cwnd if it's already undone in cwr or recovery */
|
|
|
|
+ if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
|
|
|
|
+ tp->snd_cwnd = tp->snd_ssthresh;
|
|
|
|
+ tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
|
|
+ }
|
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
|
}
|
|
}
|
|
|
|
|