|
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(skb_cow_data);
|
|
EXPORT_SYMBOL_GPL(skb_cow_data);
|
|
|
|
|
|
|
|
+static void sock_rmem_free(struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ struct sock *sk = skb->sk;
|
|
|
|
+
|
|
|
|
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Note: We dont mem charge error packets (no sk_forward_alloc changes)
|
|
|
|
+ */
|
|
|
|
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
|
|
|
+ (unsigned)sk->sk_rcvbuf)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ skb_orphan(skb);
|
|
|
|
+ skb->sk = sk;
|
|
|
|
+ skb->destructor = sock_rmem_free;
|
|
|
|
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
|
|
|
|
+
|
|
|
|
+ skb_queue_tail(&sk->sk_error_queue, skb);
|
|
|
|
+ if (!sock_flag(sk, SOCK_DEAD))
|
|
|
|
+ sk->sk_data_ready(sk, skb->len);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(sock_queue_err_skb);
|
|
|
|
+
|
|
void skb_tstamp_tx(struct sk_buff *orig_skb,
|
|
void skb_tstamp_tx(struct sk_buff *orig_skb,
|
|
struct skb_shared_hwtstamps *hwtstamps)
|
|
struct skb_shared_hwtstamps *hwtstamps)
|
|
{
|
|
{
|
|
@@ -2997,9 +3025,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
|
|
serr->ee.ee_errno = ENOMSG;
|
|
serr->ee.ee_errno = ENOMSG;
|
|
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
|
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
|
|
|
|
|
- bh_lock_sock(sk);
|
|
|
|
err = sock_queue_err_skb(sk, skb);
|
|
err = sock_queue_err_skb(sk, skb);
|
|
- bh_unlock_sock(sk);
|
|
|
|
|
|
|
|
if (err)
|
|
if (err)
|
|
kfree_skb(skb);
|
|
kfree_skb(skb);
|