|
@@ -32,6 +32,8 @@ enum tcp_metric_index {
|
|
|
|
|
|
struct tcp_fastopen_metrics {
|
|
|
u16 mss;
|
|
|
+ u16 syn_loss:10; /* Recurring Fast Open SYN losses */
|
|
|
+ unsigned long last_syn_loss; /* Last Fast Open SYN loss */
|
|
|
struct tcp_fastopen_cookie cookie;
|
|
|
};
|
|
|
|
|
@@ -125,6 +127,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
|
|
|
tm->tcpm_ts = 0;
|
|
|
tm->tcpm_ts_stamp = 0;
|
|
|
tm->tcpm_fastopen.mss = 0;
|
|
|
+ tm->tcpm_fastopen.syn_loss = 0;
|
|
|
tm->tcpm_fastopen.cookie.len = 0;
|
|
|
}
|
|
|
|
|
@@ -644,7 +647,8 @@ bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
|
|
|
static DEFINE_SEQLOCK(fastopen_seqlock);
|
|
|
|
|
|
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
|
|
|
- struct tcp_fastopen_cookie *cookie)
|
|
|
+ struct tcp_fastopen_cookie *cookie,
|
|
|
+ int *syn_loss, unsigned long *last_syn_loss)
|
|
|
{
|
|
|
struct tcp_metrics_block *tm;
|
|
|
|
|
@@ -659,14 +663,15 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
|
|
|
if (tfom->mss)
|
|
|
*mss = tfom->mss;
|
|
|
*cookie = tfom->cookie;
|
|
|
+ *syn_loss = tfom->syn_loss;
|
|
|
+ *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
|
|
|
} while (read_seqretry(&fastopen_seqlock, seq));
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
-
|
|
|
void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
|
|
|
- struct tcp_fastopen_cookie *cookie)
|
|
|
+ struct tcp_fastopen_cookie *cookie, bool syn_lost)
|
|
|
{
|
|
|
struct tcp_metrics_block *tm;
|
|
|
|
|
@@ -679,6 +684,11 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
|
|
|
tfom->mss = mss;
|
|
|
if (cookie->len > 0)
|
|
|
tfom->cookie = *cookie;
|
|
|
+ if (syn_lost) {
|
|
|
+ ++tfom->syn_loss;
|
|
|
+ tfom->last_syn_loss = jiffies;
|
|
|
+ } else
|
|
|
+ tfom->syn_loss = 0;
|
|
|
write_sequnlock_bh(&fastopen_seqlock);
|
|
|
}
|
|
|
rcu_read_unlock();
|