|
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb);
|
|
* reference count dropping and cleans up the skbuff as if it
|
|
* reference count dropping and cleans up the skbuff as if it
|
|
* just came from __alloc_skb().
|
|
* just came from __alloc_skb().
|
|
*/
|
|
*/
|
|
-int skb_recycle_check(struct sk_buff *skb, int skb_size)
|
|
|
|
|
|
+bool skb_recycle_check(struct sk_buff *skb, int skb_size)
|
|
{
|
|
{
|
|
struct skb_shared_info *shinfo;
|
|
struct skb_shared_info *shinfo;
|
|
|
|
|
|
if (irqs_disabled())
|
|
if (irqs_disabled())
|
|
- return 0;
|
|
|
|
|
|
+ return false;
|
|
|
|
|
|
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
|
|
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
|
|
- return 0;
|
|
|
|
|
|
+ return false;
|
|
|
|
|
|
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
|
|
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
|
|
if (skb_end_pointer(skb) - skb->head < skb_size)
|
|
if (skb_end_pointer(skb) - skb->head < skb_size)
|
|
- return 0;
|
|
|
|
|
|
+ return false;
|
|
|
|
|
|
if (skb_shared(skb) || skb_cloned(skb))
|
|
if (skb_shared(skb) || skb_cloned(skb))
|
|
- return 0;
|
|
|
|
|
|
+ return false;
|
|
|
|
|
|
skb_release_head_state(skb);
|
|
skb_release_head_state(skb);
|
|
|
|
|
|
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
|
|
skb->data = skb->head + NET_SKB_PAD;
|
|
skb->data = skb->head + NET_SKB_PAD;
|
|
skb_reset_tail_pointer(skb);
|
|
skb_reset_tail_pointer(skb);
|
|
|
|
|
|
- return 1;
|
|
|
|
|
|
+ return true;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(skb_recycle_check);
|
|
EXPORT_SYMBOL(skb_recycle_check);
|
|
|
|
|
|
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(skb_cow_data);
|
|
EXPORT_SYMBOL_GPL(skb_cow_data);
|
|
|
|
|
|
|
|
+static void sock_rmem_free(struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ struct sock *sk = skb->sk;
|
|
|
|
+
|
|
|
|
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Note: We dont mem charge error packets (no sk_forward_alloc changes)
|
|
|
|
+ */
|
|
|
|
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
|
|
|
+ (unsigned)sk->sk_rcvbuf)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ skb_orphan(skb);
|
|
|
|
+ skb->sk = sk;
|
|
|
|
+ skb->destructor = sock_rmem_free;
|
|
|
|
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
|
|
|
|
+
|
|
|
|
+ skb_queue_tail(&sk->sk_error_queue, skb);
|
|
|
|
+ if (!sock_flag(sk, SOCK_DEAD))
|
|
|
|
+ sk->sk_data_ready(sk, skb->len);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(sock_queue_err_skb);
|
|
|
|
+
|
|
void skb_tstamp_tx(struct sk_buff *orig_skb,
|
|
void skb_tstamp_tx(struct sk_buff *orig_skb,
|
|
struct skb_shared_hwtstamps *hwtstamps)
|
|
struct skb_shared_hwtstamps *hwtstamps)
|
|
{
|
|
{
|
|
@@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
|
|
memset(serr, 0, sizeof(*serr));
|
|
memset(serr, 0, sizeof(*serr));
|
|
serr->ee.ee_errno = ENOMSG;
|
|
serr->ee.ee_errno = ENOMSG;
|
|
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
|
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
|
|
|
+
|
|
err = sock_queue_err_skb(sk, skb);
|
|
err = sock_queue_err_skb(sk, skb);
|
|
|
|
+
|
|
if (err)
|
|
if (err)
|
|
kfree_skb(skb);
|
|
kfree_skb(skb);
|
|
}
|
|
}
|