|
@@ -25,6 +25,7 @@
|
|
|
*/
|
|
|
|
|
|
#include <linux/delay.h>
|
|
|
+#include <linux/smpboot.h>
|
|
|
|
|
|
#define RCU_KTHREAD_PRIO 1
|
|
|
|
|
@@ -1292,25 +1293,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
-
|
|
|
-/*
|
|
|
- * Stop the RCU's per-CPU kthread when its CPU goes offline,.
|
|
|
- */
|
|
|
-static void rcu_stop_cpu_kthread(int cpu)
|
|
|
-{
|
|
|
- struct task_struct *t;
|
|
|
-
|
|
|
- /* Stop the CPU's kthread. */
|
|
|
- t = per_cpu(rcu_cpu_kthread_task, cpu);
|
|
|
- if (t != NULL) {
|
|
|
- per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
|
|
|
- kthread_stop(t);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
|
-
|
|
|
static void rcu_kthread_do_work(void)
|
|
|
{
|
|
|
rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
|
|
@@ -1318,59 +1300,22 @@ static void rcu_kthread_do_work(void)
|
|
|
rcu_preempt_do_callbacks();
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Set the specified CPU's kthread to run RT or not, as specified by
|
|
|
- * the to_rt argument. The CPU-hotplug locks are held, so the task
|
|
|
- * is not going away.
|
|
|
- */
|
|
|
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
|
|
|
+static void rcu_cpu_kthread_setup(unsigned int cpu)
|
|
|
{
|
|
|
- int policy;
|
|
|
struct sched_param sp;
|
|
|
- struct task_struct *t;
|
|
|
|
|
|
- t = per_cpu(rcu_cpu_kthread_task, cpu);
|
|
|
- if (t == NULL)
|
|
|
- return;
|
|
|
- if (to_rt) {
|
|
|
- policy = SCHED_FIFO;
|
|
|
- sp.sched_priority = RCU_KTHREAD_PRIO;
|
|
|
- } else {
|
|
|
- policy = SCHED_NORMAL;
|
|
|
- sp.sched_priority = 0;
|
|
|
- }
|
|
|
- sched_setscheduler_nocheck(t, policy, &sp);
|
|
|
+ sp.sched_priority = RCU_KTHREAD_PRIO;
|
|
|
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
|
|
|
- * This can happen while the corresponding CPU is either coming online
|
|
|
- * or going offline. We cannot wait until the CPU is fully online
|
|
|
- * before starting the kthread, because the various notifier functions
|
|
|
- * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
|
|
|
- * the corresponding CPU is online.
|
|
|
- *
|
|
|
- * Return 1 if the kthread needs to stop, 0 otherwise.
|
|
|
- *
|
|
|
- * Caller must disable bh. This function can momentarily enable it.
|
|
|
- */
|
|
|
-static int rcu_cpu_kthread_should_stop(int cpu)
|
|
|
+static void rcu_cpu_kthread_park(unsigned int cpu)
|
|
|
{
|
|
|
- while (cpu_is_offline(cpu) ||
|
|
|
- !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) ||
|
|
|
- smp_processor_id() != cpu) {
|
|
|
- if (kthread_should_stop())
|
|
|
- return 1;
|
|
|
- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
|
|
- per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
|
|
|
- local_bh_enable();
|
|
|
- schedule_timeout_uninterruptible(1);
|
|
|
- if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)))
|
|
|
- set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
|
|
- local_bh_disable();
|
|
|
- }
|
|
|
- per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
|
|
|
- return 0;
|
|
|
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
|
|
+}
|
|
|
+
|
|
|
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
|
|
+{
|
|
|
+ return __get_cpu_var(rcu_cpu_has_work);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1378,96 +1323,35 @@ static int rcu_cpu_kthread_should_stop(int cpu)
|
|
|
* RCU softirq used in flavors and configurations of RCU that do not
|
|
|
* support RCU priority boosting.
|
|
|
*/
|
|
|
-static int rcu_cpu_kthread(void *arg)
|
|
|
+static void rcu_cpu_kthread(unsigned int cpu)
|
|
|
{
|
|
|
- int cpu = (int)(long)arg;
|
|
|
- unsigned long flags;
|
|
|
- int spincnt = 0;
|
|
|
- unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
|
|
|
- char work;
|
|
|
- char *workp = &per_cpu(rcu_cpu_has_work, cpu);
|
|
|
+ unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
|
|
|
+ char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
|
|
|
+ int spincnt;
|
|
|
|
|
|
- trace_rcu_utilization("Start CPU kthread@init");
|
|
|
- for (;;) {
|
|
|
- *statusp = RCU_KTHREAD_WAITING;
|
|
|
- trace_rcu_utilization("End CPU kthread@rcu_wait");
|
|
|
- rcu_wait(*workp != 0 || kthread_should_stop());
|
|
|
+ for (spincnt = 0; spincnt < 10; spincnt++) {
|
|
|
trace_rcu_utilization("Start CPU kthread@rcu_wait");
|
|
|
local_bh_disable();
|
|
|
- if (rcu_cpu_kthread_should_stop(cpu)) {
|
|
|
- local_bh_enable();
|
|
|
- break;
|
|
|
- }
|
|
|
*statusp = RCU_KTHREAD_RUNNING;
|
|
|
- per_cpu(rcu_cpu_kthread_loops, cpu)++;
|
|
|
- local_irq_save(flags);
|
|
|
+ this_cpu_inc(rcu_cpu_kthread_loops);
|
|
|
+ local_irq_disable();
|
|
|
work = *workp;
|
|
|
*workp = 0;
|
|
|
- local_irq_restore(flags);
|
|
|
+ local_irq_enable();
|
|
|
if (work)
|
|
|
rcu_kthread_do_work();
|
|
|
local_bh_enable();
|
|
|
- if (*workp != 0)
|
|
|
- spincnt++;
|
|
|
- else
|
|
|
- spincnt = 0;
|
|
|
- if (spincnt > 10) {
|
|
|
- *statusp = RCU_KTHREAD_YIELDING;
|
|
|
- trace_rcu_utilization("End CPU kthread@rcu_yield");
|
|
|
- schedule_timeout_interruptible(2);
|
|
|
- trace_rcu_utilization("Start CPU kthread@rcu_yield");
|
|
|
- spincnt = 0;
|
|
|
+ if (*workp == 0) {
|
|
|
+ trace_rcu_utilization("End CPU kthread@rcu_wait");
|
|
|
+ *statusp = RCU_KTHREAD_WAITING;
|
|
|
+ return;
|
|
|
}
|
|
|
}
|
|
|
- *statusp = RCU_KTHREAD_STOPPED;
|
|
|
- trace_rcu_utilization("End CPU kthread@term");
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Spawn a per-CPU kthread, setting up affinity and priority.
|
|
|
- * Because the CPU hotplug lock is held, no other CPU will be attempting
|
|
|
- * to manipulate rcu_cpu_kthread_task. There might be another CPU
|
|
|
- * attempting to access it during boot, but the locking in kthread_bind()
|
|
|
- * will enforce sufficient ordering.
|
|
|
- *
|
|
|
- * Please note that we cannot simply refuse to wake up the per-CPU
|
|
|
- * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
|
|
|
- * which can result in softlockup complaints if the task ends up being
|
|
|
- * idle for more than a couple of minutes.
|
|
|
- *
|
|
|
- * However, please note also that we cannot bind the per-CPU kthread to its
|
|
|
- * CPU until that CPU is fully online. We also cannot wait until the
|
|
|
- * CPU is fully online before we create its per-CPU kthread, as this would
|
|
|
- * deadlock the system when CPU notifiers tried waiting for grace
|
|
|
- * periods. So we bind the per-CPU kthread to its CPU only if the CPU
|
|
|
- * is online. If its CPU is not yet fully online, then the code in
|
|
|
- * rcu_cpu_kthread() will wait until it is fully online, and then do
|
|
|
- * the binding.
|
|
|
- */
|
|
|
-static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
|
|
|
-{
|
|
|
- struct sched_param sp;
|
|
|
- struct task_struct *t;
|
|
|
-
|
|
|
- if (!rcu_scheduler_fully_active ||
|
|
|
- per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
|
|
|
- return 0;
|
|
|
- t = kthread_create_on_node(rcu_cpu_kthread,
|
|
|
- (void *)(long)cpu,
|
|
|
- cpu_to_node(cpu),
|
|
|
- "rcuc/%d", cpu);
|
|
|
- if (IS_ERR(t))
|
|
|
- return PTR_ERR(t);
|
|
|
- if (cpu_online(cpu))
|
|
|
- kthread_bind(t, cpu);
|
|
|
- per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
|
|
|
- WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
|
|
|
- sp.sched_priority = RCU_KTHREAD_PRIO;
|
|
|
- sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
|
|
- per_cpu(rcu_cpu_kthread_task, cpu) = t;
|
|
|
- wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
|
|
|
- return 0;
|
|
|
+ *statusp = RCU_KTHREAD_YIELDING;
|
|
|
+ trace_rcu_utilization("Start CPU kthread@rcu_yield");
|
|
|
+ schedule_timeout_interruptible(2);
|
|
|
+ trace_rcu_utilization("End CPU kthread@rcu_yield");
|
|
|
+ *statusp = RCU_KTHREAD_WAITING;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1503,6 +1387,15 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
|
|
free_cpumask_var(cm);
|
|
|
}
|
|
|
|
|
|
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
|
|
+ .store = &rcu_cpu_kthread_task,
|
|
|
+ .thread_should_run = rcu_cpu_kthread_should_run,
|
|
|
+ .thread_fn = rcu_cpu_kthread,
|
|
|
+ .thread_comm = "rcuc/%u",
|
|
|
+ .setup = rcu_cpu_kthread_setup,
|
|
|
+ .park = rcu_cpu_kthread_park,
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* Spawn all kthreads -- called as soon as the scheduler is running.
|
|
|
*/
|
|
@@ -1512,11 +1405,9 @@ static int __init rcu_spawn_kthreads(void)
|
|
|
int cpu;
|
|
|
|
|
|
rcu_scheduler_fully_active = 1;
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
per_cpu(rcu_cpu_has_work, cpu) = 0;
|
|
|
- if (cpu_online(cpu))
|
|
|
- (void)rcu_spawn_one_cpu_kthread(cpu);
|
|
|
- }
|
|
|
+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
|
|
|
rnp = rcu_get_root(rcu_state);
|
|
|
(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
|
|
|
if (NUM_RCU_NODES > 1) {
|
|
@@ -1533,10 +1424,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
|
|
|
struct rcu_node *rnp = rdp->mynode;
|
|
|
|
|
|
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
|
|
|
- if (rcu_scheduler_fully_active) {
|
|
|
- (void)rcu_spawn_one_cpu_kthread(cpu);
|
|
|
+ if (rcu_scheduler_fully_active)
|
|
|
(void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
#else /* #ifdef CONFIG_RCU_BOOST */
|
|
@@ -1560,22 +1449,10 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
-
|
|
|
-static void rcu_stop_cpu_kthread(int cpu)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
|
-
|
|
|
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
|
|
|
-{
|
|
|
-}
|
|
|
-
|
|
|
static int __init rcu_scheduler_really_started(void)
|
|
|
{
|
|
|
rcu_scheduler_fully_active = 1;
|