|
@@ -235,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk)
|
|
*/
|
|
*/
|
|
|
|
|
|
/* Slow part of check#2. */
|
|
/* Slow part of check#2. */
|
|
-static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
|
|
|
|
- const struct sk_buff *skb)
|
|
|
|
|
|
+static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
/* Optimize this! */
|
|
/* Optimize this! */
|
|
int truesize = tcp_win_from_space(skb->truesize)/2;
|
|
int truesize = tcp_win_from_space(skb->truesize)/2;
|
|
int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
|
|
int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
|
|
@@ -252,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
|
|
|
|
|
|
+static void tcp_grow_window(struct sock *sk,
|
|
struct sk_buff *skb)
|
|
struct sk_buff *skb)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
/* Check #1 */
|
|
/* Check #1 */
|
|
if (tp->rcv_ssthresh < tp->window_clamp &&
|
|
if (tp->rcv_ssthresh < tp->window_clamp &&
|
|
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
|
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
|
@@ -267,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
|
|
if (tcp_win_from_space(skb->truesize) <= skb->len)
|
|
if (tcp_win_from_space(skb->truesize) <= skb->len)
|
|
incr = 2*tp->advmss;
|
|
incr = 2*tp->advmss;
|
|
else
|
|
else
|
|
- incr = __tcp_grow_window(sk, tp, skb);
|
|
|
|
|
|
+ incr = __tcp_grow_window(sk, skb);
|
|
|
|
|
|
if (incr) {
|
|
if (incr) {
|
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
|
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
|
|
@@ -330,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk)
|
|
}
|
|
}
|
|
|
|
|
|
/* 5. Recalculate window clamp after socket hit its memory bounds. */
|
|
/* 5. Recalculate window clamp after socket hit its memory bounds. */
|
|
-static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static void tcp_clamp_window(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
|
|
icsk->icsk_ack.quick = 0;
|
|
icsk->icsk_ack.quick = 0;
|
|
@@ -503,8 +506,9 @@ new_measure:
|
|
* each ACK we send, he increments snd_cwnd and transmits more of his
|
|
* each ACK we send, he increments snd_cwnd and transmits more of his
|
|
* queue. -DaveM
|
|
* queue. -DaveM
|
|
*/
|
|
*/
|
|
-static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
|
|
|
|
|
|
+static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
u32 now;
|
|
u32 now;
|
|
|
|
|
|
@@ -545,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
|
|
TCP_ECN_check_ce(tp, skb);
|
|
TCP_ECN_check_ce(tp, skb);
|
|
|
|
|
|
if (skb->len >= 128)
|
|
if (skb->len >= 128)
|
|
- tcp_grow_window(sk, tp, skb);
|
|
|
|
|
|
+ tcp_grow_window(sk, skb);
|
|
}
|
|
}
|
|
|
|
|
|
/* Called to compute a smoothed rtt estimate. The data fed to this
|
|
/* Called to compute a smoothed rtt estimate. The data fed to this
|
|
@@ -1541,8 +1545,10 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
|
|
return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
|
|
return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static inline int tcp_head_timedout(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
return tp->packets_out &&
|
|
return tp->packets_out &&
|
|
tcp_skb_timedout(sk, tcp_write_queue_head(sk));
|
|
tcp_skb_timedout(sk, tcp_write_queue_head(sk));
|
|
}
|
|
}
|
|
@@ -1640,8 +1646,9 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
|
|
* Main question: may we further continue forward transmission
|
|
* Main question: may we further continue forward transmission
|
|
* with the same cwnd?
|
|
* with the same cwnd?
|
|
*/
|
|
*/
|
|
-static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static int tcp_time_to_recover(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
__u32 packets_out;
|
|
__u32 packets_out;
|
|
|
|
|
|
/* Do not perform any recovery during FRTO algorithm */
|
|
/* Do not perform any recovery during FRTO algorithm */
|
|
@@ -1659,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
|
|
/* Trick#3 : when we use RFC2988 timer restart, fast
|
|
/* Trick#3 : when we use RFC2988 timer restart, fast
|
|
* retransmit can be triggered by timeout of queue head.
|
|
* retransmit can be triggered by timeout of queue head.
|
|
*/
|
|
*/
|
|
- if (tcp_head_timedout(sk, tp))
|
|
|
|
|
|
+ if (tcp_head_timedout(sk))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
/* Trick#4: It is still not OK... But will it be useful to delay
|
|
/* Trick#4: It is still not OK... But will it be useful to delay
|
|
@@ -1668,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
|
|
packets_out = tp->packets_out;
|
|
packets_out = tp->packets_out;
|
|
if (packets_out <= tp->reordering &&
|
|
if (packets_out <= tp->reordering &&
|
|
tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
|
|
tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
|
|
- !tcp_may_send_now(sk, tp)) {
|
|
|
|
|
|
+ !tcp_may_send_now(sk)) {
|
|
/* We have nothing to send. This connection is limited
|
|
/* We have nothing to send. This connection is limited
|
|
* either by receiver window or by application.
|
|
* either by receiver window or by application.
|
|
*/
|
|
*/
|
|
@@ -1708,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk)
|
|
|
|
|
|
/* Account for ACK, ACKing some data in Reno Recovery phase. */
|
|
/* Account for ACK, ACKing some data in Reno Recovery phase. */
|
|
|
|
|
|
-static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked)
|
|
|
|
|
|
+static void tcp_remove_reno_sacks(struct sock *sk, int acked)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
if (acked > 0) {
|
|
if (acked > 0) {
|
|
/* One ACK acked hole. The rest eat duplicate ACKs. */
|
|
/* One ACK acked hole. The rest eat duplicate ACKs. */
|
|
if (acked-1 >= tp->sacked_out)
|
|
if (acked-1 >= tp->sacked_out)
|
|
@@ -1728,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
|
|
}
|
|
}
|
|
|
|
|
|
/* Mark head of queue up as lost. */
|
|
/* Mark head of queue up as lost. */
|
|
-static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
|
|
|
|
|
|
+static void tcp_mark_head_lost(struct sock *sk,
|
|
int packets, u32 high_seq)
|
|
int packets, u32 high_seq)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
int cnt;
|
|
int cnt;
|
|
|
|
|
|
@@ -1771,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
|
|
|
|
|
|
/* Account newly detected lost packet(s) */
|
|
/* Account newly detected lost packet(s) */
|
|
|
|
|
|
-static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static void tcp_update_scoreboard(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
if (IsFack(tp)) {
|
|
if (IsFack(tp)) {
|
|
int lost = tp->fackets_out - tp->reordering;
|
|
int lost = tp->fackets_out - tp->reordering;
|
|
if (lost <= 0)
|
|
if (lost <= 0)
|
|
lost = 1;
|
|
lost = 1;
|
|
- tcp_mark_head_lost(sk, tp, lost, tp->high_seq);
|
|
|
|
|
|
+ tcp_mark_head_lost(sk, lost, tp->high_seq);
|
|
} else {
|
|
} else {
|
|
- tcp_mark_head_lost(sk, tp, 1, tp->high_seq);
|
|
|
|
|
|
+ tcp_mark_head_lost(sk, 1, tp->high_seq);
|
|
}
|
|
}
|
|
|
|
|
|
/* New heuristics: it is possible only after we switched
|
|
/* New heuristics: it is possible only after we switched
|
|
@@ -1787,7 +1799,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
|
|
* Hence, we can detect timed out packets during fast
|
|
* Hence, we can detect timed out packets during fast
|
|
* retransmit without falling to slow start.
|
|
* retransmit without falling to slow start.
|
|
*/
|
|
*/
|
|
- if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
|
|
|
|
|
|
+ if (!IsReno(tp) && tcp_head_timedout(sk)) {
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
|
|
skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
|
|
skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
|
|
@@ -1867,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
|
|
/* Undo procedures. */
|
|
/* Undo procedures. */
|
|
|
|
|
|
#if FASTRETRANS_DEBUG > 1
|
|
#if FASTRETRANS_DEBUG > 1
|
|
-static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
|
|
|
|
|
|
+static void DBGUNDO(struct sock *sk, const char *msg)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
+
|
|
printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
|
|
printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
|
|
msg,
|
|
msg,
|
|
NIPQUAD(inet->daddr), ntohs(inet->dport),
|
|
NIPQUAD(inet->daddr), ntohs(inet->dport),
|
|
@@ -1915,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp)
|
|
}
|
|
}
|
|
|
|
|
|
/* People celebrate: "We love our President!" */
|
|
/* People celebrate: "We love our President!" */
|
|
-static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static int tcp_try_undo_recovery(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
if (tcp_may_undo(tp)) {
|
|
if (tcp_may_undo(tp)) {
|
|
/* Happy end! We did not retransmit anything
|
|
/* Happy end! We did not retransmit anything
|
|
* or our original transmission succeeded.
|
|
* or our original transmission succeeded.
|
|
*/
|
|
*/
|
|
- DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
|
|
|
|
|
|
+ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
|
|
tcp_undo_cwr(sk, 1);
|
|
tcp_undo_cwr(sk, 1);
|
|
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
|
|
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
|
|
@@ -1941,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
|
|
}
|
|
}
|
|
|
|
|
|
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
|
|
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
|
|
-static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static void tcp_try_undo_dsack(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
if (tp->undo_marker && !tp->undo_retrans) {
|
|
if (tp->undo_marker && !tp->undo_retrans) {
|
|
- DBGUNDO(sk, tp, "D-SACK");
|
|
|
|
|
|
+ DBGUNDO(sk, "D-SACK");
|
|
tcp_undo_cwr(sk, 1);
|
|
tcp_undo_cwr(sk, 1);
|
|
tp->undo_marker = 0;
|
|
tp->undo_marker = 0;
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
|
|
@@ -1953,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
/* Undo during fast recovery after partial ACK. */
|
|
/* Undo during fast recovery after partial ACK. */
|
|
|
|
|
|
-static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
|
|
|
|
- int acked)
|
|
|
|
|
|
+static int tcp_try_undo_partial(struct sock *sk, int acked)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
/* Partial ACK arrived. Force Hoe's retransmit. */
|
|
/* Partial ACK arrived. Force Hoe's retransmit. */
|
|
int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
|
|
int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
|
|
|
|
|
|
@@ -1968,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
|
|
|
|
|
|
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
|
|
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
|
|
|
|
|
|
- DBGUNDO(sk, tp, "Hoe");
|
|
|
|
|
|
+ DBGUNDO(sk, "Hoe");
|
|
tcp_undo_cwr(sk, 0);
|
|
tcp_undo_cwr(sk, 0);
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
|
|
|
|
|
|
@@ -1982,8 +2000,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
|
|
}
|
|
}
|
|
|
|
|
|
/* Undo during loss recovery after partial ACK. */
|
|
/* Undo during loss recovery after partial ACK. */
|
|
-static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static int tcp_try_undo_loss(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
if (tcp_may_undo(tp)) {
|
|
if (tcp_may_undo(tp)) {
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
tcp_for_write_queue(skb, sk) {
|
|
tcp_for_write_queue(skb, sk) {
|
|
@@ -1994,7 +2014,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
clear_all_retrans_hints(tp);
|
|
clear_all_retrans_hints(tp);
|
|
|
|
|
|
- DBGUNDO(sk, tp, "partial loss");
|
|
|
|
|
|
+ DBGUNDO(sk, "partial loss");
|
|
tp->lost_out = 0;
|
|
tp->lost_out = 0;
|
|
tp->left_out = tp->sacked_out;
|
|
tp->left_out = tp->sacked_out;
|
|
tcp_undo_cwr(sk, 1);
|
|
tcp_undo_cwr(sk, 1);
|
|
@@ -2016,8 +2036,10 @@ static inline void tcp_complete_cwr(struct sock *sk)
|
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
|
}
|
|
}
|
|
|
|
|
|
-static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
|
|
|
|
|
|
+static void tcp_try_to_open(struct sock *sk, int flag)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
tp->left_out = tp->sacked_out;
|
|
tp->left_out = tp->sacked_out;
|
|
|
|
|
|
if (tp->retrans_out == 0)
|
|
if (tp->retrans_out == 0)
|
|
@@ -2111,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
|
|
before(tp->snd_una, tp->high_seq) &&
|
|
before(tp->snd_una, tp->high_seq) &&
|
|
icsk->icsk_ca_state != TCP_CA_Open &&
|
|
icsk->icsk_ca_state != TCP_CA_Open &&
|
|
tp->fackets_out > tp->reordering) {
|
|
tp->fackets_out > tp->reordering) {
|
|
- tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
|
|
|
|
|
|
+ tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq);
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2127,7 +2149,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
|
|
switch (icsk->icsk_ca_state) {
|
|
switch (icsk->icsk_ca_state) {
|
|
case TCP_CA_Loss:
|
|
case TCP_CA_Loss:
|
|
icsk->icsk_retransmits = 0;
|
|
icsk->icsk_retransmits = 0;
|
|
- if (tcp_try_undo_recovery(sk, tp))
|
|
|
|
|
|
+ if (tcp_try_undo_recovery(sk))
|
|
return;
|
|
return;
|
|
break;
|
|
break;
|
|
|
|
|
|
@@ -2141,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
|
|
break;
|
|
break;
|
|
|
|
|
|
case TCP_CA_Disorder:
|
|
case TCP_CA_Disorder:
|
|
- tcp_try_undo_dsack(sk, tp);
|
|
|
|
|
|
+ tcp_try_undo_dsack(sk);
|
|
if (!tp->undo_marker ||
|
|
if (!tp->undo_marker ||
|
|
/* For SACK case do not Open to allow to undo
|
|
/* For SACK case do not Open to allow to undo
|
|
* catching for all duplicate ACKs. */
|
|
* catching for all duplicate ACKs. */
|
|
@@ -2154,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
|
|
case TCP_CA_Recovery:
|
|
case TCP_CA_Recovery:
|
|
if (IsReno(tp))
|
|
if (IsReno(tp))
|
|
tcp_reset_reno_sack(tp);
|
|
tcp_reset_reno_sack(tp);
|
|
- if (tcp_try_undo_recovery(sk, tp))
|
|
|
|
|
|
+ if (tcp_try_undo_recovery(sk))
|
|
return;
|
|
return;
|
|
tcp_complete_cwr(sk);
|
|
tcp_complete_cwr(sk);
|
|
break;
|
|
break;
|
|
@@ -2170,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
|
|
} else {
|
|
} else {
|
|
int acked = prior_packets - tp->packets_out;
|
|
int acked = prior_packets - tp->packets_out;
|
|
if (IsReno(tp))
|
|
if (IsReno(tp))
|
|
- tcp_remove_reno_sacks(sk, tp, acked);
|
|
|
|
- is_dupack = tcp_try_undo_partial(sk, tp, acked);
|
|
|
|
|
|
+ tcp_remove_reno_sacks(sk, acked);
|
|
|
|
+ is_dupack = tcp_try_undo_partial(sk, acked);
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
case TCP_CA_Loss:
|
|
case TCP_CA_Loss:
|
|
if (flag&FLAG_DATA_ACKED)
|
|
if (flag&FLAG_DATA_ACKED)
|
|
icsk->icsk_retransmits = 0;
|
|
icsk->icsk_retransmits = 0;
|
|
- if (!tcp_try_undo_loss(sk, tp)) {
|
|
|
|
|
|
+ if (!tcp_try_undo_loss(sk)) {
|
|
tcp_moderate_cwnd(tp);
|
|
tcp_moderate_cwnd(tp);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
return;
|
|
return;
|
|
@@ -2194,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
|
|
}
|
|
}
|
|
|
|
|
|
if (icsk->icsk_ca_state == TCP_CA_Disorder)
|
|
if (icsk->icsk_ca_state == TCP_CA_Disorder)
|
|
- tcp_try_undo_dsack(sk, tp);
|
|
|
|
|
|
+ tcp_try_undo_dsack(sk);
|
|
|
|
|
|
- if (!tcp_time_to_recover(sk, tp)) {
|
|
|
|
- tcp_try_to_open(sk, tp, flag);
|
|
|
|
|
|
+ if (!tcp_time_to_recover(sk)) {
|
|
|
|
+ tcp_try_to_open(sk, flag);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2236,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
|
|
tcp_set_ca_state(sk, TCP_CA_Recovery);
|
|
tcp_set_ca_state(sk, TCP_CA_Recovery);
|
|
}
|
|
}
|
|
|
|
|
|
- if (is_dupack || tcp_head_timedout(sk, tp))
|
|
|
|
- tcp_update_scoreboard(sk, tp);
|
|
|
|
|
|
+ if (is_dupack || tcp_head_timedout(sk))
|
|
|
|
+ tcp_update_scoreboard(sk);
|
|
tcp_cwnd_down(sk);
|
|
tcp_cwnd_down(sk);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
tcp_xmit_retransmit_queue(sk);
|
|
}
|
|
}
|
|
@@ -2313,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
|
* RFC2988 recommends to restart timer to now+rto.
|
|
* RFC2988 recommends to restart timer to now+rto.
|
|
*/
|
|
*/
|
|
|
|
|
|
-static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static void tcp_ack_packets_out(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
if (!tp->packets_out) {
|
|
if (!tp->packets_out) {
|
|
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
|
|
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
|
|
} else {
|
|
} else {
|
|
@@ -2471,7 +2495,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
|
|
|
|
|
|
if (acked&FLAG_ACKED) {
|
|
if (acked&FLAG_ACKED) {
|
|
tcp_ack_update_rtt(sk, acked, seq_rtt);
|
|
tcp_ack_update_rtt(sk, acked, seq_rtt);
|
|
- tcp_ack_packets_out(sk, tp);
|
|
|
|
|
|
+ tcp_ack_packets_out(sk);
|
|
if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
|
|
if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
|
|
(*rtt_sample)(sk, tcp_usrtt(&tv));
|
|
(*rtt_sample)(sk, tcp_usrtt(&tv));
|
|
|
|
|
|
@@ -2556,9 +2580,10 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack
|
|
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
|
|
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
|
|
* and in FreeBSD. NetBSD's one is even worse.) is wrong.
|
|
* and in FreeBSD. NetBSD's one is even worse.) is wrong.
|
|
*/
|
|
*/
|
|
-static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
|
|
|
|
- struct sk_buff *skb, u32 ack, u32 ack_seq)
|
|
|
|
|
|
+static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
|
|
|
|
+ u32 ack_seq)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
int flag = 0;
|
|
int flag = 0;
|
|
u32 nwin = ntohs(tcp_hdr(skb)->window);
|
|
u32 nwin = ntohs(tcp_hdr(skb)->window);
|
|
|
|
|
|
@@ -2576,7 +2601,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
|
|
* fast path is recovered for sending TCP.
|
|
* fast path is recovered for sending TCP.
|
|
*/
|
|
*/
|
|
tp->pred_flags = 0;
|
|
tp->pred_flags = 0;
|
|
- tcp_fast_path_check(sk, tp);
|
|
|
|
|
|
+ tcp_fast_path_check(sk);
|
|
|
|
|
|
if (nwin > tp->max_window) {
|
|
if (nwin > tp->max_window) {
|
|
tp->max_window = nwin;
|
|
tp->max_window = nwin;
|
|
@@ -2762,7 +2787,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
else
|
|
else
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
|
|
NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
|
|
|
|
|
|
- flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
|
|
|
|
|
|
+ flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
|
|
|
|
|
|
if (TCP_SKB_CB(skb)->sacked)
|
|
if (TCP_SKB_CB(skb)->sacked)
|
|
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
|
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
|
@@ -3426,7 +3451,7 @@ queue_and_out:
|
|
}
|
|
}
|
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
|
if (skb->len)
|
|
if (skb->len)
|
|
- tcp_event_data_recv(sk, tp, skb);
|
|
|
|
|
|
+ tcp_event_data_recv(sk, skb);
|
|
if (th->fin)
|
|
if (th->fin)
|
|
tcp_fin(skb, sk, th);
|
|
tcp_fin(skb, sk, th);
|
|
|
|
|
|
@@ -3443,7 +3468,7 @@ queue_and_out:
|
|
if (tp->rx_opt.num_sacks)
|
|
if (tp->rx_opt.num_sacks)
|
|
tcp_sack_remove(tp);
|
|
tcp_sack_remove(tp);
|
|
|
|
|
|
- tcp_fast_path_check(sk, tp);
|
|
|
|
|
|
+ tcp_fast_path_check(sk);
|
|
|
|
|
|
if (eaten > 0)
|
|
if (eaten > 0)
|
|
__kfree_skb(skb);
|
|
__kfree_skb(skb);
|
|
@@ -3734,7 +3759,7 @@ static int tcp_prune_queue(struct sock *sk)
|
|
NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
|
|
NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
|
|
|
|
|
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
|
- tcp_clamp_window(sk, tp);
|
|
|
|
|
|
+ tcp_clamp_window(sk);
|
|
else if (tcp_memory_pressure)
|
|
else if (tcp_memory_pressure)
|
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
|
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
|
|
|
|
|
@@ -3803,8 +3828,10 @@ void tcp_cwnd_application_limited(struct sock *sk)
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
}
|
|
}
|
|
|
|
|
|
-static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static int tcp_should_expand_sndbuf(struct sock *sk)
|
|
{
|
|
{
|
|
|
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
+
|
|
/* If the user specified a specific send buffer setting, do
|
|
/* If the user specified a specific send buffer setting, do
|
|
* not modify it.
|
|
* not modify it.
|
|
*/
|
|
*/
|
|
@@ -3836,7 +3863,7 @@ static void tcp_new_space(struct sock *sk)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
- if (tcp_should_expand_sndbuf(sk, tp)) {
|
|
|
|
|
|
+ if (tcp_should_expand_sndbuf(sk)) {
|
|
int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
|
|
int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
|
|
MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
|
|
MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
|
|
demanded = max_t(unsigned int, tp->snd_cwnd,
|
|
demanded = max_t(unsigned int, tp->snd_cwnd,
|
|
@@ -3860,9 +3887,9 @@ static void tcp_check_space(struct sock *sk)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
|
|
|
|
|
|
+static inline void tcp_data_snd_check(struct sock *sk)
|
|
{
|
|
{
|
|
- tcp_push_pending_frames(sk, tp);
|
|
|
|
|
|
+ tcp_push_pending_frames(sk);
|
|
tcp_check_space(sk);
|
|
tcp_check_space(sk);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4196,7 +4223,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
*/
|
|
*/
|
|
tcp_ack(sk, skb, 0);
|
|
tcp_ack(sk, skb, 0);
|
|
__kfree_skb(skb);
|
|
__kfree_skb(skb);
|
|
- tcp_data_snd_check(sk, tp);
|
|
|
|
|
|
+ tcp_data_snd_check(sk);
|
|
return 0;
|
|
return 0;
|
|
} else { /* Header too small */
|
|
} else { /* Header too small */
|
|
TCP_INC_STATS_BH(TCP_MIB_INERRS);
|
|
TCP_INC_STATS_BH(TCP_MIB_INERRS);
|
|
@@ -4267,12 +4294,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
|
}
|
|
}
|
|
|
|
|
|
- tcp_event_data_recv(sk, tp, skb);
|
|
|
|
|
|
+ tcp_event_data_recv(sk, skb);
|
|
|
|
|
|
if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
|
|
if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
|
|
/* Well, only one small jumplet in fast path... */
|
|
/* Well, only one small jumplet in fast path... */
|
|
tcp_ack(sk, skb, FLAG_DATA);
|
|
tcp_ack(sk, skb, FLAG_DATA);
|
|
- tcp_data_snd_check(sk, tp);
|
|
|
|
|
|
+ tcp_data_snd_check(sk);
|
|
if (!inet_csk_ack_scheduled(sk))
|
|
if (!inet_csk_ack_scheduled(sk))
|
|
goto no_ack;
|
|
goto no_ack;
|
|
}
|
|
}
|
|
@@ -4355,7 +4382,7 @@ step5:
|
|
/* step 7: process the segment text */
|
|
/* step 7: process the segment text */
|
|
tcp_data_queue(sk, skb);
|
|
tcp_data_queue(sk, skb);
|
|
|
|
|
|
- tcp_data_snd_check(sk, tp);
|
|
|
|
|
|
+ tcp_data_snd_check(sk);
|
|
tcp_ack_snd_check(sk);
|
|
tcp_ack_snd_check(sk);
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -4672,7 +4699,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|
/* Do step6 onward by hand. */
|
|
/* Do step6 onward by hand. */
|
|
tcp_urg(sk, skb, th);
|
|
tcp_urg(sk, skb, th);
|
|
__kfree_skb(skb);
|
|
__kfree_skb(skb);
|
|
- tcp_data_snd_check(sk, tp);
|
|
|
|
|
|
+ tcp_data_snd_check(sk);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4864,7 +4891,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
|
|
|
/* tcp_data could move socket to TIME-WAIT */
|
|
/* tcp_data could move socket to TIME-WAIT */
|
|
if (sk->sk_state != TCP_CLOSE) {
|
|
if (sk->sk_state != TCP_CLOSE) {
|
|
- tcp_data_snd_check(sk, tp);
|
|
|
|
|
|
+ tcp_data_snd_check(sk);
|
|
tcp_ack_snd_check(sk);
|
|
tcp_ack_snd_check(sk);
|
|
}
|
|
}
|
|
|
|
|