|
@@ -2711,6 +2711,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
|
|
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
|
|
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
|
|
EXPORT_SYMBOL(rps_sock_flow_table);
|
|
EXPORT_SYMBOL(rps_sock_flow_table);
|
|
|
|
|
|
|
|
+struct jump_label_key rps_needed __read_mostly;
|
|
|
|
+
|
|
static struct rps_dev_flow *
|
|
static struct rps_dev_flow *
|
|
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
|
|
struct rps_dev_flow *rflow, u16 next_cpu)
|
|
struct rps_dev_flow *rflow, u16 next_cpu)
|
|
@@ -2994,7 +2996,7 @@ int netif_rx(struct sk_buff *skb)
|
|
|
|
|
|
trace_netif_rx(skb);
|
|
trace_netif_rx(skb);
|
|
#ifdef CONFIG_RPS
|
|
#ifdef CONFIG_RPS
|
|
- {
|
|
|
|
|
|
+ if (static_branch(&rps_needed)) {
|
|
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
|
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
@@ -3009,14 +3011,13 @@ int netif_rx(struct sk_buff *skb)
|
|
|
|
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
preempt_enable();
|
|
preempt_enable();
|
|
- }
|
|
|
|
-#else
|
|
|
|
|
|
+ } else
|
|
|
|
+#endif
|
|
{
|
|
{
|
|
unsigned int qtail;
|
|
unsigned int qtail;
|
|
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
|
|
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
|
|
put_cpu();
|
|
put_cpu();
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(netif_rx);
|
|
EXPORT_SYMBOL(netif_rx);
|
|
@@ -3359,7 +3360,7 @@ int netif_receive_skb(struct sk_buff *skb)
|
|
return NET_RX_SUCCESS;
|
|
return NET_RX_SUCCESS;
|
|
|
|
|
|
#ifdef CONFIG_RPS
|
|
#ifdef CONFIG_RPS
|
|
- {
|
|
|
|
|
|
+ if (static_branch(&rps_needed)) {
|
|
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
|
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
|
int cpu, ret;
|
|
int cpu, ret;
|
|
|
|
|
|
@@ -3370,16 +3371,12 @@ int netif_receive_skb(struct sk_buff *skb)
|
|
if (cpu >= 0) {
|
|
if (cpu >= 0) {
|
|
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
|
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
- } else {
|
|
|
|
- rcu_read_unlock();
|
|
|
|
- ret = __netif_receive_skb(skb);
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
-
|
|
|
|
- return ret;
|
|
|
|
|
|
+ rcu_read_unlock();
|
|
}
|
|
}
|
|
-#else
|
|
|
|
- return __netif_receive_skb(skb);
|
|
|
|
#endif
|
|
#endif
|
|
|
|
+ return __netif_receive_skb(skb);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(netif_receive_skb);
|
|
EXPORT_SYMBOL(netif_receive_skb);
|
|
|
|
|