|
@@ -1183,7 +1183,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
|
|
#if TCP_DEBUG
|
|
|
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
|
|
|
|
|
|
- WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
|
|
|
+ WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
|
|
|
+ KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
|
|
|
+ tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
|
|
|
#endif
|
|
|
|
|
|
if (inet_csk_ack_scheduled(sk)) {
|
|
@@ -1430,11 +1432,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
|
/* Now that we have two receive queues this
|
|
|
* shouldn't happen.
|
|
|
*/
|
|
|
- if (before(*seq, TCP_SKB_CB(skb)->seq)) {
|
|
|
- printk(KERN_INFO "recvmsg bug: copied %X "
|
|
|
- "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
|
|
|
+ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
|
|
|
+ KERN_INFO "recvmsg bug: copied %X "
|
|
|
+ "seq %X rcvnxt %X fl %X\n", *seq,
|
|
|
+ TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
|
|
|
+ flags))
|
|
|
break;
|
|
|
- }
|
|
|
+
|
|
|
offset = *seq - TCP_SKB_CB(skb)->seq;
|
|
|
if (tcp_hdr(skb)->syn)
|
|
|
offset--;
|
|
@@ -1443,8 +1447,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
|
if (tcp_hdr(skb)->fin)
|
|
|
goto found_fin_ok;
|
|
|
WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
|
|
|
- "copied %X seq %X\n", *seq,
|
|
|
- TCP_SKB_CB(skb)->seq);
|
|
|
+ "copied %X seq %X rcvnxt %X fl %X\n",
|
|
|
+ *seq, TCP_SKB_CB(skb)->seq,
|
|
|
+ tp->rcv_nxt, flags);
|
|
|
}
|
|
|
|
|
|
/* Well, if we have backlog, try to process it now yet. */
|