|
@@ -7024,6 +7024,11 @@ fail:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+#define RCU_MIGRATION_IDLE 0
|
|
|
+#define RCU_MIGRATION_NEED_QS 1
|
|
|
+#define RCU_MIGRATION_GOT_QS 2
|
|
|
+#define RCU_MIGRATION_MUST_SYNC 3
|
|
|
+
|
|
|
/*
|
|
|
* migration_thread - this is a highprio system thread that performs
|
|
|
* thread migration by bumping thread off CPU then 'pushing' onto
|
|
@@ -7031,6 +7036,7 @@ fail:
|
|
|
*/
|
|
|
static int migration_thread(void *data)
|
|
|
{
|
|
|
+ int badcpu;
|
|
|
int cpu = (long)data;
|
|
|
struct rq *rq;
|
|
|
|
|
@@ -7065,8 +7071,17 @@ static int migration_thread(void *data)
|
|
|
req = list_entry(head->next, struct migration_req, list);
|
|
|
list_del_init(head->next);
|
|
|
|
|
|
- spin_unlock(&rq->lock);
|
|
|
- __migrate_task(req->task, cpu, req->dest_cpu);
|
|
|
+ if (req->task != NULL) {
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
+ __migrate_task(req->task, cpu, req->dest_cpu);
|
|
|
+ } else if (likely(cpu == (badcpu = smp_processor_id()))) {
|
|
|
+ req->dest_cpu = RCU_MIGRATION_GOT_QS;
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
+ } else {
|
|
|
+ req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
|
|
|
+ spin_unlock(&rq->lock);
|
|
|
+ WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
|
|
|
+ }
|
|
|
local_irq_enable();
|
|
|
|
|
|
complete(&req->done);
|
|
@@ -10554,3 +10569,113 @@ struct cgroup_subsys cpuacct_subsys = {
|
|
|
.subsys_id = cpuacct_subsys_id,
|
|
|
};
|
|
|
#endif /* CONFIG_CGROUP_CPUACCT */
|
|
|
+
|
|
|
+#ifndef CONFIG_SMP
|
|
|
+
|
|
|
+int rcu_expedited_torture_stats(char *page)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
|
|
|
+
|
|
|
+void synchronize_sched_expedited(void)
|
|
|
+{
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
|
|
|
+
|
|
|
+#else /* #ifndef CONFIG_SMP */
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
|
|
|
+static DEFINE_MUTEX(rcu_sched_expedited_mutex);
|
|
|
+
|
|
|
+#define RCU_EXPEDITED_STATE_POST -2
|
|
|
+#define RCU_EXPEDITED_STATE_IDLE -1
|
|
|
+
|
|
|
+static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
|
|
|
+
|
|
|
+int rcu_expedited_torture_stats(char *page)
|
|
|
+{
|
|
|
+ int cnt = 0;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ cnt += sprintf(&page[cnt], " %d:%d",
|
|
|
+ cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
|
|
|
+ }
|
|
|
+ cnt += sprintf(&page[cnt], "\n");
|
|
|
+ return cnt;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
|
|
|
+
|
|
|
+static long synchronize_sched_expedited_count;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Wait for an rcu-sched grace period to elapse, but use "big hammer"
|
|
|
+ * approach to force grace period to end quickly. This consumes
|
|
|
+ * significant time on all CPUs, and is thus not recommended for
|
|
|
+ * any sort of common-case code.
|
|
|
+ *
|
|
|
+ * Note that it is illegal to call this function while holding any
|
|
|
+ * lock that is acquired by a CPU-hotplug notifier. Failing to
|
|
|
+ * observe this restriction will result in deadlock.
|
|
|
+ */
|
|
|
+void synchronize_sched_expedited(void)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+ unsigned long flags;
|
|
|
+ bool need_full_sync = 0;
|
|
|
+ struct rq *rq;
|
|
|
+ struct migration_req *req;
|
|
|
+ long snap;
|
|
|
+ int trycount = 0;
|
|
|
+
|
|
|
+ smp_mb(); /* ensure prior mod happens before capturing snap. */
|
|
|
+ snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
|
|
|
+ get_online_cpus();
|
|
|
+ while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
|
|
|
+ put_online_cpus();
|
|
|
+ if (trycount++ < 10)
|
|
|
+ udelay(trycount * num_online_cpus());
|
|
|
+ else {
|
|
|
+ synchronize_sched();
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
|
|
|
+ smp_mb(); /* ensure test happens before caller kfree */
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ get_online_cpus();
|
|
|
+ }
|
|
|
+ rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ rq = cpu_rq(cpu);
|
|
|
+ req = &per_cpu(rcu_migration_req, cpu);
|
|
|
+ init_completion(&req->done);
|
|
|
+ req->task = NULL;
|
|
|
+ req->dest_cpu = RCU_MIGRATION_NEED_QS;
|
|
|
+ spin_lock_irqsave(&rq->lock, flags);
|
|
|
+ list_add(&req->list, &rq->migration_queue);
|
|
|
+ spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
+ wake_up_process(rq->migration_thread);
|
|
|
+ }
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ rcu_expedited_state = cpu;
|
|
|
+ req = &per_cpu(rcu_migration_req, cpu);
|
|
|
+ rq = cpu_rq(cpu);
|
|
|
+ wait_for_completion(&req->done);
|
|
|
+ spin_lock_irqsave(&rq->lock, flags);
|
|
|
+ if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
|
|
|
+ need_full_sync = 1;
|
|
|
+ req->dest_cpu = RCU_MIGRATION_IDLE;
|
|
|
+ spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
+ }
|
|
|
+ rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
|
|
|
+ mutex_unlock(&rcu_sched_expedited_mutex);
|
|
|
+ put_online_cpus();
|
|
|
+ if (need_full_sync)
|
|
|
+ synchronize_sched();
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
|
|
|
+
|
|
|
+#endif /* #else #ifndef CONFIG_SMP */
|