|
@@ -256,7 +256,6 @@ struct sock {
|
|
|
struct sk_buff *head;
|
|
|
struct sk_buff *tail;
|
|
|
int len;
|
|
|
- int limit;
|
|
|
} sk_backlog;
|
|
|
wait_queue_head_t *sk_sleep;
|
|
|
struct dst_entry *sk_dst_cache;
|
|
@@ -608,10 +607,20 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|
|
skb->next = NULL;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Take into account size of receive queue and backlog queue
|
|
|
+ */
|
|
|
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
|
|
|
+{
|
|
|
+ unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
|
|
|
+
|
|
|
+ return qsize + skb->truesize > sk->sk_rcvbuf;
|
|
|
+}
|
|
|
+
|
|
|
/* The per-socket spinlock must be held here. */
|
|
|
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
|
- if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
|
|
|
+ if (sk_rcvqueues_full(sk, skb))
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
__sk_add_backlog(sk, skb);
|