|
@@ -1041,6 +1041,8 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
|
|
* robustness against future implementation changes.
|
|
* robustness against future implementation changes.
|
|
*/
|
|
*/
|
|
smp_mb(); /* See above comment block. */
|
|
smp_mb(); /* See above comment block. */
|
|
|
|
+ if (cpumask_first(cpu_online_mask) == smp_processor_id())
|
|
|
|
+ atomic_inc(&synchronize_sched_expedited_count);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1053,13 +1055,26 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
|
|
* Note that it is illegal to call this function while holding any
|
|
* Note that it is illegal to call this function while holding any
|
|
* lock that is acquired by a CPU-hotplug notifier. Failing to
|
|
* lock that is acquired by a CPU-hotplug notifier. Failing to
|
|
* observe this restriction will result in deadlock.
|
|
* observe this restriction will result in deadlock.
|
|
|
|
+ *
|
|
|
|
+ * The synchronize_sched_expedited_cpu_stop() function is called
|
|
|
|
+ * in stop-CPU context, but in order to keep overhead down to a dull
|
|
|
|
+ * roar, we don't force this function to wait for its counterparts
|
|
|
|
+ * on other CPUs. One instance of this function will increment the
|
|
|
|
+ * synchronize_sched_expedited_count variable per call to
|
|
|
|
+ * try_stop_cpus(), but there is no guarantee what order this instance
|
|
|
|
+ * will occur in. The worst case is that it is last on one call
|
|
|
|
+ * to try_stop_cpus(), and the first on the next call. This means
|
|
|
|
+ * that piggybacking requires that synchronize_sched_expedited_count
|
|
|
|
+ * be incremented by 3: this guarantees that the piggybacking
|
|
|
|
+ * task has waited through an entire cycle of context switches,
|
|
|
|
+ * even in the worst case.
|
|
*/
|
|
*/
|
|
void synchronize_sched_expedited(void)
|
|
void synchronize_sched_expedited(void)
|
|
{
|
|
{
|
|
int snap, trycount = 0;
|
|
int snap, trycount = 0;
|
|
|
|
|
|
smp_mb(); /* ensure prior mod happens before capturing snap. */
|
|
smp_mb(); /* ensure prior mod happens before capturing snap. */
|
|
- snap = atomic_read(&synchronize_sched_expedited_count) + 1;
|
|
|
|
|
|
+ snap = atomic_read(&synchronize_sched_expedited_count) + 2;
|
|
get_online_cpus();
|
|
get_online_cpus();
|
|
while (try_stop_cpus(cpu_online_mask,
|
|
while (try_stop_cpus(cpu_online_mask,
|
|
synchronize_sched_expedited_cpu_stop,
|
|
synchronize_sched_expedited_cpu_stop,
|
|
@@ -1077,7 +1092,6 @@ void synchronize_sched_expedited(void)
|
|
}
|
|
}
|
|
get_online_cpus();
|
|
get_online_cpus();
|
|
}
|
|
}
|
|
- atomic_inc(&synchronize_sched_expedited_count);
|
|
|
|
smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
|
|
smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
}
|
|
}
|