|
@@ -2531,7 +2531,8 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
|
|
|
if (!*isidle || rdp->rsp != rcu_sysidle_state ||
|
|
|
cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
|
|
|
return;
|
|
|
- /* WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); */
|
|
|
+ if (rcu_gp_in_progress(rdp->rsp))
|
|
|
+ WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
|
|
|
|
|
|
/* Pick up current idle and NMI-nesting counter and check. */
|
|
|
cur = atomic_read(&rdtp->dynticks_idle);
|
|
@@ -2556,6 +2557,20 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp)
|
|
|
return rsp == rcu_sysidle_state;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Bind the grace-period kthread for the sysidle flavor of RCU to the
|
|
|
+ * timekeeping CPU.
|
|
|
+ */
|
|
|
+static void rcu_bind_gp_kthread(void)
|
|
|
+{
|
|
|
+ int cpu = ACCESS_ONCE(tick_do_timer_cpu);
|
|
|
+
|
|
|
+ if (cpu < 0 || cpu >= nr_cpu_ids)
|
|
|
+ return;
|
|
|
+ if (raw_smp_processor_id() != cpu)
|
|
|
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Return a delay in jiffies based on the number of CPUs, rcu_node
|
|
|
* leaf fanout, and jiffies tick rate. The idea is to allow larger
|
|
@@ -2766,6 +2781,10 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static void rcu_bind_gp_kthread(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
|
|
|
unsigned long maxj)
|
|
|
{
|