|
@@ -1448,13 +1448,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
if (skb->len > (dev->mtu + dev->hard_header_len))
|
|
|
return NET_RX_DROP;
|
|
|
|
|
|
- skb_dst_drop(skb);
|
|
|
+ skb_set_dev(skb, dev);
|
|
|
skb->tstamp.tv64 = 0;
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
|
- skb->mark = 0;
|
|
|
- secpath_reset(skb);
|
|
|
- nf_reset(skb);
|
|
|
return netif_rx(skb);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(dev_forward_skb);
|
|
@@ -1614,6 +1611,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * skb_dev_set -- assign a new device to a buffer
|
|
|
+ * @skb: buffer for the new device
|
|
|
+ * @dev: network device
|
|
|
+ *
|
|
|
+ * If an skb is owned by a device already, we have to reset
|
|
|
+ * all data private to the namespace a device belongs to
|
|
|
+ * before assigning it a new device.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_NET_NS
|
|
|
+void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
|
|
|
+{
|
|
|
+ skb_dst_drop(skb);
|
|
|
+ if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
|
|
|
+ secpath_reset(skb);
|
|
|
+ nf_reset(skb);
|
|
|
+ skb_init_secmark(skb);
|
|
|
+ skb->mark = 0;
|
|
|
+ skb->priority = 0;
|
|
|
+ skb->nf_trace = 0;
|
|
|
+ skb->ipvs_property = 0;
|
|
|
+#ifdef CONFIG_NET_SCHED
|
|
|
+ skb->tc_index = 0;
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ skb->dev = dev;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(skb_set_dev);
|
|
|
+#endif /* CONFIG_NET_NS */
|
|
|
+
|
|
|
/*
|
|
|
* Invalidate hardware checksum when packet is to be mangled, and
|
|
|
* complete checksum manually on outgoing path.
|