|
@@ -3155,6 +3155,23 @@ void netdev_rx_handler_unregister(struct net_device *dev)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
|
|
|
|
|
|
+/*
|
|
|
+ * Limit the use of PFMEMALLOC reserves to those protocols that implement
|
|
|
+ * the special handling of PFMEMALLOC skbs.
|
|
|
+ */
|
|
|
+static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ switch (skb->protocol) {
|
|
|
+ case __constant_htons(ETH_P_ARP):
|
|
|
+ case __constant_htons(ETH_P_IP):
|
|
|
+ case __constant_htons(ETH_P_IPV6):
|
|
|
+ case __constant_htons(ETH_P_8021Q):
|
|
|
+ return true;
|
|
|
+ default:
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int __netif_receive_skb(struct sk_buff *skb)
|
|
|
{
|
|
|
struct packet_type *ptype, *pt_prev;
|
|
@@ -3164,14 +3181,27 @@ static int __netif_receive_skb(struct sk_buff *skb)
|
|
|
bool deliver_exact = false;
|
|
|
int ret = NET_RX_DROP;
|
|
|
__be16 type;
|
|
|
+ unsigned long pflags = current->flags;
|
|
|
|
|
|
net_timestamp_check(!netdev_tstamp_prequeue, skb);
|
|
|
|
|
|
trace_netif_receive_skb(skb);
|
|
|
|
|
|
+ /*
|
|
|
+ * PFMEMALLOC skbs are special, they should
|
|
|
+ * - be delivered to SOCK_MEMALLOC sockets only
|
|
|
+ * - stay away from userspace
|
|
|
+ * - have bounded memory usage
|
|
|
+ *
|
|
|
+ * Use PF_MEMALLOC as this saves us from propagating the allocation
|
|
|
+ * context down to all allocation sites.
|
|
|
+ */
|
|
|
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
|
|
|
+ current->flags |= PF_MEMALLOC;
|
|
|
+
|
|
|
/* if we've gotten here through NAPI, check netpoll */
|
|
|
if (netpoll_receive_skb(skb))
|
|
|
- return NET_RX_DROP;
|
|
|
+ goto out;
|
|
|
|
|
|
orig_dev = skb->dev;
|
|
|
|
|
@@ -3191,7 +3221,7 @@ another_round:
|
|
|
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
|
|
|
skb = vlan_untag(skb);
|
|
|
if (unlikely(!skb))
|
|
|
- goto out;
|
|
|
+ goto unlock;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
@@ -3201,6 +3231,9 @@ another_round:
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
|
|
|
+ goto skip_taps;
|
|
|
+
|
|
|
list_for_each_entry_rcu(ptype, &ptype_all, list) {
|
|
|
if (!ptype->dev || ptype->dev == skb->dev) {
|
|
|
if (pt_prev)
|
|
@@ -3209,13 +3242,18 @@ another_round:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+skip_taps:
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
|
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
|
|
|
if (!skb)
|
|
|
- goto out;
|
|
|
+ goto unlock;
|
|
|
ncls:
|
|
|
#endif
|
|
|
|
|
|
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb)
|
|
|
+ && !skb_pfmemalloc_protocol(skb))
|
|
|
+ goto drop;
|
|
|
+
|
|
|
rx_handler = rcu_dereference(skb->dev->rx_handler);
|
|
|
if (vlan_tx_tag_present(skb)) {
|
|
|
if (pt_prev) {
|
|
@@ -3225,7 +3263,7 @@ ncls:
|
|
|
if (vlan_do_receive(&skb, !rx_handler))
|
|
|
goto another_round;
|
|
|
else if (unlikely(!skb))
|
|
|
- goto out;
|
|
|
+ goto unlock;
|
|
|
}
|
|
|
|
|
|
if (rx_handler) {
|
|
@@ -3235,7 +3273,7 @@ ncls:
|
|
|
}
|
|
|
switch (rx_handler(&skb)) {
|
|
|
case RX_HANDLER_CONSUMED:
|
|
|
- goto out;
|
|
|
+ goto unlock;
|
|
|
case RX_HANDLER_ANOTHER:
|
|
|
goto another_round;
|
|
|
case RX_HANDLER_EXACT:
|
|
@@ -3268,6 +3306,7 @@ ncls:
|
|
|
else
|
|
|
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
|
|
|
} else {
|
|
|
+drop:
|
|
|
atomic_long_inc(&skb->dev->rx_dropped);
|
|
|
kfree_skb(skb);
|
|
|
/* Jamal, now you will not able to escape explaining
|
|
@@ -3276,8 +3315,10 @@ ncls:
|
|
|
ret = NET_RX_DROP;
|
|
|
}
|
|
|
|
|
|
-out:
|
|
|
+unlock:
|
|
|
rcu_read_unlock();
|
|
|
+out:
|
|
|
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
|
|
|
return ret;
|
|
|
}
|
|
|
|