|
@@ -934,7 +934,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
|
|
|
|
|
|
sched = !sock_flag(other, SOCK_DEAD) &&
|
|
sched = !sock_flag(other, SOCK_DEAD) &&
|
|
!(other->sk_shutdown & RCV_SHUTDOWN) &&
|
|
!(other->sk_shutdown & RCV_SHUTDOWN) &&
|
|
- (skb_queue_len(&other->sk_receive_queue) >=
|
|
|
|
|
|
+ (skb_queue_len(&other->sk_receive_queue) >
|
|
other->sk_max_ack_backlog);
|
|
other->sk_max_ack_backlog);
|
|
|
|
|
|
unix_state_runlock(other);
|
|
unix_state_runlock(other);
|
|
@@ -1008,7 +1008,7 @@ restart:
|
|
if (other->sk_state != TCP_LISTEN)
|
|
if (other->sk_state != TCP_LISTEN)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
|
|
|
|
- if (skb_queue_len(&other->sk_receive_queue) >=
|
|
|
|
|
|
+ if (skb_queue_len(&other->sk_receive_queue) >
|
|
other->sk_max_ack_backlog) {
|
|
other->sk_max_ack_backlog) {
|
|
err = -EAGAIN;
|
|
err = -EAGAIN;
|
|
if (!timeo)
|
|
if (!timeo)
|
|
@@ -1381,7 +1381,7 @@ restart:
|
|
}
|
|
}
|
|
|
|
|
|
if (unix_peer(other) != sk &&
|
|
if (unix_peer(other) != sk &&
|
|
- (skb_queue_len(&other->sk_receive_queue) >=
|
|
|
|
|
|
+ (skb_queue_len(&other->sk_receive_queue) >
|
|
other->sk_max_ack_backlog)) {
|
|
other->sk_max_ack_backlog)) {
|
|
if (!timeo) {
|
|
if (!timeo) {
|
|
err = -EAGAIN;
|
|
err = -EAGAIN;
|