|
@@ -2830,9 +2830,13 @@ static int tcp_try_undo_loss(struct sock *sk)
|
|
static inline void tcp_complete_cwr(struct sock *sk)
|
|
static inline void tcp_complete_cwr(struct sock *sk)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
- /* Do not moderate cwnd if it's already undone in cwr or recovery */
|
|
|
|
- if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
|
|
|
|
- tp->snd_cwnd = tp->snd_ssthresh;
|
|
|
|
|
|
+
|
|
|
|
+ /* Do not moderate cwnd if it's already undone in cwr or recovery. */
|
|
|
|
+ if (tp->undo_marker) {
|
|
|
|
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
|
|
|
|
+ tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
|
|
|
|
+ else /* PRR */
|
|
|
|
+ tp->snd_cwnd = tp->snd_ssthresh;
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
}
|
|
}
|
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
|
@@ -2950,6 +2954,38 @@ void tcp_simple_retransmit(struct sock *sk)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(tcp_simple_retransmit);
|
|
EXPORT_SYMBOL(tcp_simple_retransmit);
|
|
|
|
|
|
|
|
+/* This function implements the PRR algorithm, specifcally the PRR-SSRB
|
|
|
|
+ * (proportional rate reduction with slow start reduction bound) as described in
|
|
|
|
+ * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
|
|
|
|
+ * It computes the number of packets to send (sndcnt) based on packets newly
|
|
|
|
+ * delivered:
|
|
|
|
+ * 1) If the packets in flight is larger than ssthresh, PRR spreads the
|
|
|
|
+ * cwnd reductions across a full RTT.
|
|
|
|
+ * 2) If packets in flight is lower than ssthresh (such as due to excess
|
|
|
|
+ * losses and/or application stalls), do not perform any further cwnd
|
|
|
|
+ * reductions, but instead slow start up to ssthresh.
|
|
|
|
+ */
|
|
|
|
+static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
|
|
|
|
+ int fast_rexmit, int flag)
|
|
|
|
+{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+ int sndcnt = 0;
|
|
|
|
+ int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
|
|
|
|
+
|
|
|
|
+ if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
|
|
|
|
+ u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
|
|
|
|
+ tp->prior_cwnd - 1;
|
|
|
|
+ sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
|
|
|
|
+ } else {
|
|
|
|
+ sndcnt = min_t(int, delta,
|
|
|
|
+ max_t(int, tp->prr_delivered - tp->prr_out,
|
|
|
|
+ newly_acked_sacked) + 1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
|
|
|
|
+ tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
|
|
|
|
+}
|
|
|
|
+
|
|
/* Process an event, which can update packets-in-flight not trivially.
|
|
/* Process an event, which can update packets-in-flight not trivially.
|
|
* Main goal of this function is to calculate new estimate for left_out,
|
|
* Main goal of this function is to calculate new estimate for left_out,
|
|
* taking into account both packets sitting in receiver's buffer and
|
|
* taking into account both packets sitting in receiver's buffer and
|
|
@@ -2961,7 +2997,8 @@ EXPORT_SYMBOL(tcp_simple_retransmit);
|
|
* It does _not_ decide what to send, it is made in function
|
|
* It does _not_ decide what to send, it is made in function
|
|
* tcp_xmit_retransmit_queue().
|
|
* tcp_xmit_retransmit_queue().
|
|
*/
|
|
*/
|
|
-static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
|
|
|
+static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
|
|
|
|
+ int newly_acked_sacked, int flag)
|
|
{
|
|
{
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
@@ -3111,13 +3148,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
|
|
|
|
|
|
tp->bytes_acked = 0;
|
|
tp->bytes_acked = 0;
|
|
tp->snd_cwnd_cnt = 0;
|
|
tp->snd_cwnd_cnt = 0;
|
|
|
|
+ tp->prior_cwnd = tp->snd_cwnd;
|
|
|
|
+ tp->prr_delivered = 0;
|
|
|
|
+ tp->prr_out = 0;
|
|
tcp_set_ca_state(sk, TCP_CA_Recovery);
|
|
tcp_set_ca_state(sk, TCP_CA_Recovery);
|
|
fast_rexmit = 1;
|
|
fast_rexmit = 1;
|
|
}
|
|
}
|
|
|
|
|
|
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
|
|
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
|
|
tcp_update_scoreboard(sk, fast_rexmit);
|
|
tcp_update_scoreboard(sk, fast_rexmit);
|
|
- tcp_cwnd_down(sk, flag);
|
|
|
|
|
|
+ tp->prr_delivered += newly_acked_sacked;
|
|
|
|
+ tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3632,6 +3673,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
u32 prior_in_flight;
|
|
u32 prior_in_flight;
|
|
u32 prior_fackets;
|
|
u32 prior_fackets;
|
|
int prior_packets;
|
|
int prior_packets;
|
|
|
|
+ int prior_sacked = tp->sacked_out;
|
|
|
|
+ int newly_acked_sacked = 0;
|
|
int frto_cwnd = 0;
|
|
int frto_cwnd = 0;
|
|
|
|
|
|
/* If the ack is older than previous acks
|
|
/* If the ack is older than previous acks
|
|
@@ -3703,6 +3746,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
/* See if we can take anything off of the retransmit queue. */
|
|
/* See if we can take anything off of the retransmit queue. */
|
|
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
|
|
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
|
|
|
|
|
|
|
|
+ newly_acked_sacked = (prior_packets - prior_sacked) -
|
|
|
|
+ (tp->packets_out - tp->sacked_out);
|
|
|
|
+
|
|
if (tp->frto_counter)
|
|
if (tp->frto_counter)
|
|
frto_cwnd = tcp_process_frto(sk, flag);
|
|
frto_cwnd = tcp_process_frto(sk, flag);
|
|
/* Guarantee sacktag reordering detection against wrap-arounds */
|
|
/* Guarantee sacktag reordering detection against wrap-arounds */
|
|
@@ -3715,7 +3761,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
tcp_may_raise_cwnd(sk, flag))
|
|
tcp_may_raise_cwnd(sk, flag))
|
|
tcp_cong_avoid(sk, ack, prior_in_flight);
|
|
tcp_cong_avoid(sk, ack, prior_in_flight);
|
|
tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
|
|
tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
|
|
- flag);
|
|
|
|
|
|
+ newly_acked_sacked, flag);
|
|
} else {
|
|
} else {
|
|
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
|
|
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
|
|
tcp_cong_avoid(sk, ack, prior_in_flight);
|
|
tcp_cong_avoid(sk, ack, prior_in_flight);
|