|
@@ -4080,8 +4080,10 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
|
|
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
|
|
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+ struct dst_entry *dst = __sk_dst_get(sk);
|
|
|
|
|
|
- if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
|
|
|
|
|
|
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
|
|
|
|
+ !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
|
|
int mib_idx;
|
|
int mib_idx;
|
|
|
|
|
|
if (before(seq, tp->rcv_nxt))
|
|
if (before(seq, tp->rcv_nxt))
|
|
@@ -4110,13 +4112,15 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
|
|
static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
|
|
static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+ struct dst_entry *dst = __sk_dst_get(sk);
|
|
|
|
|
|
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
|
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
|
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
|
|
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
|
tcp_enter_quickack_mode(sk);
|
|
tcp_enter_quickack_mode(sk);
|
|
|
|
|
|
- if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
|
|
|
|
|
|
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
|
|
|
|
+ !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
|
|
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
|
|
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
|
|
|
|
|
|
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
|
|
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
|