|
@@ -1417,11 +1417,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|
|
if ((dup_sack && in_sack) &&
|
|
|
(sacked&TCPCB_SACKED_ACKED))
|
|
|
reord = min(fack_count, reord);
|
|
|
- } else {
|
|
|
- /* If it was in a hole, we detected reordering. */
|
|
|
- if (fack_count < prior_fackets &&
|
|
|
- !(sacked&TCPCB_SACKED_ACKED))
|
|
|
- reord = min(fack_count, reord);
|
|
|
}
|
|
|
|
|
|
/* Nothing to do; acked frame is about to be dropped. */
|
|
@@ -2634,7 +2629,8 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
|
|
|
* is before the ack sequence we can discard it as it's confirmed to have
|
|
|
* arrived at the other end.
|
|
|
*/
|
|
|
-static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
|
|
|
+static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p,
|
|
|
+ int prior_fackets)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
@@ -2643,6 +2639,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
|
|
|
int fully_acked = 1;
|
|
|
int flag = 0;
|
|
|
int prior_packets = tp->packets_out;
|
|
|
+ u32 cnt = 0;
|
|
|
+ u32 reord = tp->packets_out;
|
|
|
s32 seq_rtt = -1;
|
|
|
ktime_t last_ackt = net_invalid_timestamp();
|
|
|
|
|
@@ -2683,10 +2681,14 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
|
|
|
if ((flag & FLAG_DATA_ACKED) ||
|
|
|
(packets_acked > 1))
|
|
|
flag |= FLAG_NONHEAD_RETRANS_ACKED;
|
|
|
- } else if (seq_rtt < 0) {
|
|
|
- seq_rtt = now - scb->when;
|
|
|
- if (fully_acked)
|
|
|
- last_ackt = skb->tstamp;
|
|
|
+ } else {
|
|
|
+ if (seq_rtt < 0) {
|
|
|
+ seq_rtt = now - scb->when;
|
|
|
+ if (fully_acked)
|
|
|
+ last_ackt = skb->tstamp;
|
|
|
+ }
|
|
|
+ if (!(sacked & TCPCB_SACKED_ACKED))
|
|
|
+ reord = min(cnt, reord);
|
|
|
}
|
|
|
|
|
|
if (sacked & TCPCB_SACKED_ACKED)
|
|
@@ -2697,12 +2699,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
|
|
|
if ((sacked & TCPCB_URG) && tp->urg_mode &&
|
|
|
!before(end_seq, tp->snd_up))
|
|
|
tp->urg_mode = 0;
|
|
|
- } else if (seq_rtt < 0) {
|
|
|
- seq_rtt = now - scb->when;
|
|
|
- if (fully_acked)
|
|
|
- last_ackt = skb->tstamp;
|
|
|
+ } else {
|
|
|
+ if (seq_rtt < 0) {
|
|
|
+ seq_rtt = now - scb->when;
|
|
|
+ if (fully_acked)
|
|
|
+ last_ackt = skb->tstamp;
|
|
|
+ }
|
|
|
+ reord = min(cnt, reord);
|
|
|
}
|
|
|
tp->packets_out -= packets_acked;
|
|
|
+ cnt += packets_acked;
|
|
|
|
|
|
/* Initial outgoing SYN's get put onto the write_queue
|
|
|
* just like anything else we transmit. It is not
|
|
@@ -2734,13 +2740,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
|
|
|
tcp_ack_update_rtt(sk, flag, seq_rtt);
|
|
|
tcp_rearm_rto(sk);
|
|
|
|
|
|
+ if (tcp_is_reno(tp)) {
|
|
|
+ tcp_remove_reno_sacks(sk, pkts_acked);
|
|
|
+ } else {
|
|
|
+ /* Non-retransmitted hole got filled? That's reordering */
|
|
|
+ if (reord < prior_fackets)
|
|
|
+ tcp_update_reordering(sk, tp->fackets_out - reord, 0);
|
|
|
+ }
|
|
|
+
|
|
|
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
|
|
|
/* hint's skb might be NULL but we don't need to care */
|
|
|
tp->fastpath_cnt_hint -= min_t(u32, pkts_acked,
|
|
|
tp->fastpath_cnt_hint);
|
|
|
- if (tcp_is_reno(tp))
|
|
|
- tcp_remove_reno_sacks(sk, pkts_acked);
|
|
|
-
|
|
|
if (ca_ops->pkts_acked) {
|
|
|
s32 rtt_us = -1;
|
|
|
|
|
@@ -3023,6 +3034,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
u32 ack_seq = TCP_SKB_CB(skb)->seq;
|
|
|
u32 ack = TCP_SKB_CB(skb)->ack_seq;
|
|
|
u32 prior_in_flight;
|
|
|
+ u32 prior_fackets;
|
|
|
s32 seq_rtt;
|
|
|
int prior_packets;
|
|
|
int frto_cwnd = 0;
|
|
@@ -3047,6 +3059,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
|
|
|
}
|
|
|
|
|
|
+ prior_fackets = tp->fackets_out;
|
|
|
+
|
|
|
if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
|
|
|
/* Window is constant, pure forward advance.
|
|
|
* No more checks are required.
|
|
@@ -3088,7 +3102,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
prior_in_flight = tcp_packets_in_flight(tp);
|
|
|
|
|
|
/* See if we can take anything off of the retransmit queue. */
|
|
|
- flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
|
|
|
+ flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
|
|
|
|
|
|
/* Guarantee sacktag reordering detection against wrap-arounds */
|
|
|
if (before(tp->frto_highmark, tp->snd_una))
|