|
@@ -890,30 +890,31 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
- if (!sysctl_tcp_low_latency && tp->ucopy.task) {
|
|
|
- __skb_queue_tail(&tp->ucopy.prequeue, skb);
|
|
|
- tp->ucopy.memory += skb->truesize;
|
|
|
- if (tp->ucopy.memory > sk->sk_rcvbuf) {
|
|
|
- struct sk_buff *skb1;
|
|
|
-
|
|
|
- BUG_ON(sock_owned_by_user(sk));
|
|
|
-
|
|
|
- while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
|
|
|
- sk_backlog_rcv(sk, skb1);
|
|
|
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
|
|
|
- }
|
|
|
-
|
|
|
- tp->ucopy.memory = 0;
|
|
|
- } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
|
|
|
- wake_up_interruptible(sk->sk_sleep);
|
|
|
- if (!inet_csk_ack_scheduled(sk))
|
|
|
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
|
|
- (3 * TCP_RTO_MIN) / 4,
|
|
|
- TCP_RTO_MAX);
|
|
|
+ if (sysctl_tcp_low_latency || !tp->ucopy.task)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
|
|
|
+ tp->ucopy.memory += skb->truesize;
|
|
|
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
|
|
|
+ struct sk_buff *skb1;
|
|
|
+
|
|
|
+ BUG_ON(sock_owned_by_user(sk));
|
|
|
+
|
|
|
+ while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
|
|
|
+ sk_backlog_rcv(sk, skb1);
|
|
|
+ NET_INC_STATS_BH(sock_net(sk),
|
|
|
+ LINUX_MIB_TCPPREQUEUEDROPPED);
|
|
|
}
|
|
|
- return 1;
|
|
|
+
|
|
|
+ tp->ucopy.memory = 0;
|
|
|
+ } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
|
|
|
+ wake_up_interruptible(sk->sk_sleep);
|
|
|
+ if (!inet_csk_ack_scheduled(sk))
|
|
|
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
|
|
+ (3 * TCP_RTO_MIN) / 4,
|
|
|
+ TCP_RTO_MAX);
|
|
|
}
|
|
|
- return 0;
|
|
|
+ return 1;
|
|
|
}
|
|
|
|
|
|
|