|
@@ -4484,7 +4484,24 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
|
|
end_seq = TCP_SKB_CB(skb)->end_seq;
|
|
|
|
|
|
if (seq == TCP_SKB_CB(skb1)->end_seq) {
|
|
|
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
|
|
|
+ /* Packets in ofo can stay in queue a long time.
|
|
|
+ * Better try to coalesce them right now
|
|
|
+ * to avoid future tcp_collapse_ofo_queue(),
|
|
|
+ * probably the most expensive function in tcp stack.
|
|
|
+ */
|
|
|
+ if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) {
|
|
|
+ NET_INC_STATS_BH(sock_net(sk),
|
|
|
+ LINUX_MIB_TCPRCVCOALESCE);
|
|
|
+ BUG_ON(skb_copy_bits(skb, 0,
|
|
|
+ skb_put(skb1, skb->len),
|
|
|
+ skb->len));
|
|
|
+ TCP_SKB_CB(skb1)->end_seq = end_seq;
|
|
|
+ TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
|
|
|
+ __kfree_skb(skb);
|
|
|
+ skb = NULL;
|
|
|
+ } else {
|
|
|
+ __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
|
|
|
+ }
|
|
|
|
|
|
if (!tp->rx_opt.num_sacks ||
|
|
|
tp->selective_acks[0].end_seq != seq)
|