|
@@ -665,7 +665,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
|
|
int large_allowed)
|
|
int large_allowed)
|
|
{
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
- u32 xmit_size_goal;
|
|
|
|
|
|
+ u32 xmit_size_goal, old_size_goal;
|
|
|
|
|
|
xmit_size_goal = mss_now;
|
|
xmit_size_goal = mss_now;
|
|
|
|
|
|
@@ -676,7 +676,17 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
|
|
tp->tcp_header_len);
|
|
tp->tcp_header_len);
|
|
|
|
|
|
xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
|
|
xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
|
|
- xmit_size_goal -= (xmit_size_goal % mss_now);
|
|
|
|
|
|
+
|
|
|
|
+ /* We try hard to avoid divides here */
|
|
|
|
+ old_size_goal = tp->xmit_size_goal_segs * mss_now;
|
|
|
|
+
|
|
|
|
+ if (likely(old_size_goal <= xmit_size_goal &&
|
|
|
|
+ old_size_goal + mss_now > xmit_size_goal)) {
|
|
|
|
+ xmit_size_goal = old_size_goal;
|
|
|
|
+ } else {
|
|
|
|
+ tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
|
|
|
|
+ xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
return xmit_size_goal;
|
|
return xmit_size_goal;
|