|
@@ -1048,6 +1048,7 @@ struct tcp_sacktag_state {
|
|
|
int reord;
|
|
|
int fack_count;
|
|
|
int flag;
|
|
|
+ s32 rtt; /* RTT measured by SACKing never-retransmitted data */
|
|
|
};
|
|
|
|
|
|
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
|
|
@@ -1108,7 +1109,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
|
|
|
static u8 tcp_sacktag_one(struct sock *sk,
|
|
|
struct tcp_sacktag_state *state, u8 sacked,
|
|
|
u32 start_seq, u32 end_seq,
|
|
|
- bool dup_sack, int pcount)
|
|
|
+ int dup_sack, int pcount, u32 xmit_time)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
int fack_count = state->fack_count;
|
|
@@ -1148,6 +1149,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
|
|
|
state->reord);
|
|
|
if (!after(end_seq, tp->high_seq))
|
|
|
state->flag |= FLAG_ORIG_SACK_ACKED;
|
|
|
+ /* Pick the earliest sequence sacked for RTT */
|
|
|
+ if (state->rtt < 0)
|
|
|
+ state->rtt = tcp_time_stamp - xmit_time;
|
|
|
}
|
|
|
|
|
|
if (sacked & TCPCB_LOST) {
|
|
@@ -1205,7 +1209,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
|
|
* tcp_highest_sack_seq() when skb is highest_sack.
|
|
|
*/
|
|
|
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
|
|
|
- start_seq, end_seq, dup_sack, pcount);
|
|
|
+ start_seq, end_seq, dup_sack, pcount,
|
|
|
+ TCP_SKB_CB(skb)->when);
|
|
|
|
|
|
if (skb == tp->lost_skb_hint)
|
|
|
tp->lost_cnt_hint += pcount;
|
|
@@ -1479,7 +1484,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
|
|
|
TCP_SKB_CB(skb)->seq,
|
|
|
TCP_SKB_CB(skb)->end_seq,
|
|
|
dup_sack,
|
|
|
- tcp_skb_pcount(skb));
|
|
|
+ tcp_skb_pcount(skb),
|
|
|
+ TCP_SKB_CB(skb)->when);
|
|
|
|
|
|
if (!before(TCP_SKB_CB(skb)->seq,
|
|
|
tcp_highest_sack_seq(tp)))
|
|
@@ -1536,7 +1542,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
|
|
|
|
|
|
static int
|
|
|
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
|
|
- u32 prior_snd_una)
|
|
|
+ u32 prior_snd_una, s32 *sack_rtt)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
const unsigned char *ptr = (skb_transport_header(ack_skb) +
|
|
@@ -1554,6 +1560,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
|
|
|
|
|
state.flag = 0;
|
|
|
state.reord = tp->packets_out;
|
|
|
+ state.rtt = -1;
|
|
|
|
|
|
if (!tp->sacked_out) {
|
|
|
if (WARN_ON(tp->fackets_out))
|
|
@@ -1737,6 +1744,7 @@ out:
|
|
|
WARN_ON((int)tp->retrans_out < 0);
|
|
|
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
|
|
|
#endif
|
|
|
+ *sack_rtt = state.rtt;
|
|
|
return state.flag;
|
|
|
}
|
|
|
|
|
@@ -3254,6 +3262,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
int prior_packets = tp->packets_out;
|
|
|
const int prior_unsacked = tp->packets_out - tp->sacked_out;
|
|
|
int acked = 0; /* Number of packets newly acked */
|
|
|
+ s32 sack_rtt = -1;
|
|
|
|
|
|
/* If the ack is older than previous acks
|
|
|
* then we can probably ignore it.
|
|
@@ -3310,7 +3319,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
|
|
|
|
|
|
if (TCP_SKB_CB(skb)->sacked)
|
|
|
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
|
|
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
|
|
|
+ &sack_rtt);
|
|
|
|
|
|
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
|
|
|
flag |= FLAG_ECE;
|
|
@@ -3382,7 +3392,8 @@ old_ack:
|
|
|
* If data was DSACKed, see if we can undo a cwnd reduction.
|
|
|
*/
|
|
|
if (TCP_SKB_CB(skb)->sacked) {
|
|
|
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
|
|
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
|
|
|
+ &sack_rtt);
|
|
|
tcp_fastretrans_alert(sk, acked, prior_unsacked,
|
|
|
is_dupack, flag);
|
|
|
}
|