|
@@ -45,6 +45,12 @@
|
|
|
#include <linux/mutex.h>
|
|
|
#include <linux/module.h>
|
|
|
|
|
|
+enum rcu_barrier {
|
|
|
+ RCU_BARRIER_STD,
|
|
|
+ RCU_BARRIER_BH,
|
|
|
+ RCU_BARRIER_SCHED,
|
|
|
+};
|
|
|
+
|
|
|
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
|
|
|
static atomic_t rcu_barrier_cpu_count;
|
|
|
static DEFINE_MUTEX(rcu_barrier_mutex);
|
|
@@ -83,19 +89,30 @@ static void rcu_barrier_callback(struct rcu_head *notused)
|
|
|
/*
|
|
|
* Called with preemption disabled, and from cross-cpu IRQ context.
|
|
|
*/
|
|
|
-static void rcu_barrier_func(void *notused)
|
|
|
+static void rcu_barrier_func(void *type)
|
|
|
{
|
|
|
int cpu = smp_processor_id();
|
|
|
struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
|
|
|
|
|
|
atomic_inc(&rcu_barrier_cpu_count);
|
|
|
- call_rcu(head, rcu_barrier_callback);
|
|
|
+ switch ((enum rcu_barrier)type) {
|
|
|
+ case RCU_BARRIER_STD:
|
|
|
+ call_rcu(head, rcu_barrier_callback);
|
|
|
+ break;
|
|
|
+ case RCU_BARRIER_BH:
|
|
|
+ call_rcu_bh(head, rcu_barrier_callback);
|
|
|
+ break;
|
|
|
+ case RCU_BARRIER_SCHED:
|
|
|
+ call_rcu_sched(head, rcu_barrier_callback);
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * rcu_barrier - Wait until all the in-flight RCUs are complete.
|
|
|
+/*
|
|
|
+ * Orchestrate the specified type of RCU barrier, waiting for all
|
|
|
+ * RCU callbacks of the specified type to complete.
|
|
|
*/
|
|
|
-void rcu_barrier(void)
|
|
|
+static void _rcu_barrier(enum rcu_barrier type)
|
|
|
{
|
|
|
BUG_ON(in_interrupt());
|
|
|
/* Take cpucontrol mutex to protect against CPU hotplug */
|
|
@@ -111,13 +128,39 @@ void rcu_barrier(void)
|
|
|
* until all the callbacks are queued.
|
|
|
*/
|
|
|
rcu_read_lock();
|
|
|
- on_each_cpu(rcu_barrier_func, NULL, 0, 1);
|
|
|
+ on_each_cpu(rcu_barrier_func, (void *)type, 0, 1);
|
|
|
rcu_read_unlock();
|
|
|
wait_for_completion(&rcu_barrier_completion);
|
|
|
mutex_unlock(&rcu_barrier_mutex);
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
|
|
|
+ */
|
|
|
+void rcu_barrier(void)
|
|
|
+{
|
|
|
+ _rcu_barrier(RCU_BARRIER_STD);
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(rcu_barrier);
|
|
|
|
|
|
+/**
|
|
|
+ * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
|
|
|
+ */
|
|
|
+void rcu_barrier_bh(void)
|
|
|
+{
|
|
|
+ _rcu_barrier(RCU_BARRIER_BH);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
|
|
+
|
|
|
+/**
|
|
|
+ * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
|
|
|
+ */
|
|
|
+void rcu_barrier_sched(void)
|
|
|
+{
|
|
|
+ _rcu_barrier(RCU_BARRIER_SCHED);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rcu_barrier_sched);
|
|
|
+
|
|
|
void __init rcu_init(void)
|
|
|
{
|
|
|
__rcu_init();
|