|
@@ -2259,69 +2259,41 @@ static inline void ____napi_schedule(struct softnet_data *sd,
|
|
|
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_RPS
|
|
|
-
|
|
|
-/* One global table that all flow-based protocols share. */
|
|
|
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
|
|
|
-EXPORT_SYMBOL(rps_sock_flow_table);
|
|
|
-
|
|
|
/*
|
|
|
- * get_rps_cpu is called from netif_receive_skb and returns the target
|
|
|
- * CPU from the RPS map of the receiving queue for a given skb.
|
|
|
- * rcu_read_lock must be held on entry.
|
|
|
+ * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
|
|
|
+ * and src/dst port numbers. Returns a non-zero hash number on success
|
|
|
+ * and 0 on failure.
|
|
|
*/
|
|
|
-static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|
|
- struct rps_dev_flow **rflowp)
|
|
|
+__u32 __skb_get_rxhash(struct sk_buff *skb)
|
|
|
{
|
|
|
+ int nhoff, hash = 0;
|
|
|
struct ipv6hdr *ip6;
|
|
|
struct iphdr *ip;
|
|
|
- struct netdev_rx_queue *rxqueue;
|
|
|
- struct rps_map *map;
|
|
|
- struct rps_dev_flow_table *flow_table;
|
|
|
- struct rps_sock_flow_table *sock_flow_table;
|
|
|
- int cpu = -1;
|
|
|
u8 ip_proto;
|
|
|
- u16 tcpu;
|
|
|
u32 addr1, addr2, ihl;
|
|
|
union {
|
|
|
u32 v32;
|
|
|
u16 v16[2];
|
|
|
} ports;
|
|
|
|
|
|
- if (skb_rx_queue_recorded(skb)) {
|
|
|
- u16 index = skb_get_rx_queue(skb);
|
|
|
- if (unlikely(index >= dev->num_rx_queues)) {
|
|
|
- WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
|
|
|
- "on queue %u, but number of RX queues is %u\n",
|
|
|
- dev->name, index, dev->num_rx_queues);
|
|
|
- goto done;
|
|
|
- }
|
|
|
- rxqueue = dev->_rx + index;
|
|
|
- } else
|
|
|
- rxqueue = dev->_rx;
|
|
|
-
|
|
|
- if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
|
|
|
- goto done;
|
|
|
-
|
|
|
- if (skb->rxhash)
|
|
|
- goto got_hash; /* Skip hash computation on packet header */
|
|
|
+ nhoff = skb_network_offset(skb);
|
|
|
|
|
|
switch (skb->protocol) {
|
|
|
case __constant_htons(ETH_P_IP):
|
|
|
- if (!pskb_may_pull(skb, sizeof(*ip)))
|
|
|
+ if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
|
|
|
goto done;
|
|
|
|
|
|
- ip = (struct iphdr *) skb->data;
|
|
|
+ ip = (struct iphdr *) skb->data + nhoff;
|
|
|
ip_proto = ip->protocol;
|
|
|
addr1 = (__force u32) ip->saddr;
|
|
|
addr2 = (__force u32) ip->daddr;
|
|
|
ihl = ip->ihl;
|
|
|
break;
|
|
|
case __constant_htons(ETH_P_IPV6):
|
|
|
- if (!pskb_may_pull(skb, sizeof(*ip6)))
|
|
|
+ if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
|
|
|
goto done;
|
|
|
|
|
|
- ip6 = (struct ipv6hdr *) skb->data;
|
|
|
+ ip6 = (struct ipv6hdr *) skb->data + nhoff;
|
|
|
ip_proto = ip6->nexthdr;
|
|
|
addr1 = (__force u32) ip6->saddr.s6_addr32[3];
|
|
|
addr2 = (__force u32) ip6->daddr.s6_addr32[3];
|
|
@@ -2330,6 +2302,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|
|
default:
|
|
|
goto done;
|
|
|
}
|
|
|
+
|
|
|
switch (ip_proto) {
|
|
|
case IPPROTO_TCP:
|
|
|
case IPPROTO_UDP:
|
|
@@ -2338,8 +2311,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|
|
case IPPROTO_AH:
|
|
|
case IPPROTO_SCTP:
|
|
|
case IPPROTO_UDPLITE:
|
|
|
- if (pskb_may_pull(skb, (ihl * 4) + 4)) {
|
|
|
- ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
|
|
|
+ if (pskb_may_pull(skb, (ihl * 4) + 4 + nhoff)) {
|
|
|
+ ports.v32 = * (__force u32 *) (skb->data + nhoff +
|
|
|
+ (ihl * 4));
|
|
|
if (ports.v16[1] < ports.v16[0])
|
|
|
swap(ports.v16[0], ports.v16[1]);
|
|
|
break;
|
|
@@ -2352,11 +2326,55 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|
|
/* get a consistent hash (same value on both flow directions) */
|
|
|
if (addr2 < addr1)
|
|
|
swap(addr1, addr2);
|
|
|
- skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
|
|
|
- if (!skb->rxhash)
|
|
|
- skb->rxhash = 1;
|
|
|
|
|
|
-got_hash:
|
|
|
+ hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
|
|
|
+ if (!hash)
|
|
|
+ hash = 1;
|
|
|
+
|
|
|
+done:
|
|
|
+ return hash;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__skb_get_rxhash);
|
|
|
+
|
|
|
+#ifdef CONFIG_RPS
|
|
|
+
|
|
|
+/* One global table that all flow-based protocols share. */
|
|
|
+struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
|
|
|
+EXPORT_SYMBOL(rps_sock_flow_table);
|
|
|
+
|
|
|
+/*
|
|
|
+ * get_rps_cpu is called from netif_receive_skb and returns the target
|
|
|
+ * CPU from the RPS map of the receiving queue for a given skb.
|
|
|
+ * rcu_read_lock must be held on entry.
|
|
|
+ */
|
|
|
+static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|
|
+ struct rps_dev_flow **rflowp)
|
|
|
+{
|
|
|
+ struct netdev_rx_queue *rxqueue;
|
|
|
+ struct rps_map *map;
|
|
|
+ struct rps_dev_flow_table *flow_table;
|
|
|
+ struct rps_sock_flow_table *sock_flow_table;
|
|
|
+ int cpu = -1;
|
|
|
+ u16 tcpu;
|
|
|
+
|
|
|
+ if (skb_rx_queue_recorded(skb)) {
|
|
|
+ u16 index = skb_get_rx_queue(skb);
|
|
|
+ if (unlikely(index >= dev->num_rx_queues)) {
|
|
|
+ WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
|
|
|
+ "on queue %u, but number of RX queues is %u\n",
|
|
|
+ dev->name, index, dev->num_rx_queues);
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+ rxqueue = dev->_rx + index;
|
|
|
+ } else
|
|
|
+ rxqueue = dev->_rx;
|
|
|
+
|
|
|
+ if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+ if (!skb_get_rxhash(skb))
|
|
|
+ goto done;
|
|
|
+
|
|
|
flow_table = rcu_dereference(rxqueue->rps_flow_table);
|
|
|
sock_flow_table = rcu_dereference(rps_sock_flow_table);
|
|
|
if (flow_table && sock_flow_table) {
|