|
@@ -66,11 +66,73 @@ static inline void rcutorture_record_progress(unsigned long vernum)
|
|
|
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
|
|
|
|
|
|
/* Exported common interfaces */
|
|
|
+
|
|
|
+#ifdef CONFIG_PREEMPT_RCU
|
|
|
+
|
|
|
+/**
|
|
|
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
|
|
|
+ * @head: structure to be used for queueing the RCU updates.
|
|
|
+ * @func: actual callback function to be invoked after the grace period
|
|
|
+ *
|
|
|
+ * The callback function will be invoked some time after a full grace
|
|
|
+ * period elapses, in other words after all pre-existing RCU read-side
|
|
|
+ * critical sections have completed. However, the callback function
|
|
|
+ * might well execute concurrently with RCU read-side critical sections
|
|
|
+ * that started after call_rcu() was invoked. RCU read-side critical
|
|
|
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
|
|
|
+ * and may be nested.
|
|
|
+ */
|
|
|
+extern void call_rcu(struct rcu_head *head,
|
|
|
+ void (*func)(struct rcu_head *head));
|
|
|
+
|
|
|
+#else /* #ifdef CONFIG_PREEMPT_RCU */
|
|
|
+
|
|
|
+/* In classic RCU, call_rcu() is just call_rcu_sched(). */
|
|
|
+#define call_rcu call_rcu_sched
|
|
|
+
|
|
|
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
|
|
+
|
|
|
+/**
|
|
|
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
|
|
|
+ * @head: structure to be used for queueing the RCU updates.
|
|
|
+ * @func: actual callback function to be invoked after the grace period
|
|
|
+ *
|
|
|
+ * The callback function will be invoked some time after a full grace
|
|
|
+ * period elapses, in other words after all currently executing RCU
|
|
|
+ * read-side critical sections have completed. call_rcu_bh() assumes
|
|
|
+ * that the read-side critical sections end on completion of a softirq
|
|
|
+ * handler. This means that read-side critical sections in process
|
|
|
+ * context must not be interrupted by softirqs. This interface is to be
|
|
|
+ * used when most of the read-side critical sections are in softirq context.
|
|
|
+ * RCU read-side critical sections are delimited by :
|
|
|
+ * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
|
|
|
+ * OR
|
|
|
+ * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
|
|
|
+ * These may be nested.
|
|
|
+ */
|
|
|
+extern void call_rcu_bh(struct rcu_head *head,
|
|
|
+ void (*func)(struct rcu_head *head));
|
|
|
+
|
|
|
+/**
|
|
|
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
|
|
|
+ * @head: structure to be used for queueing the RCU updates.
|
|
|
+ * @func: actual callback function to be invoked after the grace period
|
|
|
+ *
|
|
|
+ * The callback function will be invoked some time after a full grace
|
|
|
+ * period elapses, in other words after all currently executing RCU
|
|
|
+ * read-side critical sections have completed. call_rcu_sched() assumes
|
|
|
+ * that the read-side critical sections end on enabling of preemption
|
|
|
+ * or on voluntary preemption.
|
|
|
+ * RCU read-side critical sections are delimited by :
|
|
|
+ * - rcu_read_lock_sched() and rcu_read_unlock_sched(),
|
|
|
+ * OR
|
|
|
+ * anything that disables preemption.
|
|
|
+ * These may be nested.
|
|
|
+ */
|
|
|
extern void call_rcu_sched(struct rcu_head *head,
|
|
|
void (*func)(struct rcu_head *rcu));
|
|
|
+
|
|
|
extern void synchronize_sched(void);
|
|
|
-extern void rcu_barrier_bh(void);
|
|
|
-extern void rcu_barrier_sched(void);
|
|
|
|
|
|
static inline void __rcu_read_lock_bh(void)
|
|
|
{
|
|
@@ -143,6 +205,15 @@ static inline void rcu_exit_nohz(void)
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_NO_HZ */
|
|
|
|
|
|
+/*
|
|
|
+ * Infrastructure to implement the synchronize_() primitives in
|
|
|
+ * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
|
|
|
+ */
|
|
|
+
|
|
|
+typedef void call_rcu_func_t(struct rcu_head *head,
|
|
|
+ void (*func)(struct rcu_head *head));
|
|
|
+void wait_rcu_gp(call_rcu_func_t crf);
|
|
|
+
|
|
|
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
|
|
|
#include <linux/rcutree.h>
|
|
|
#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
|
|
@@ -723,61 +794,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
|
|
#define RCU_INIT_POINTER(p, v) \
|
|
|
p = (typeof(*v) __force __rcu *)(v)
|
|
|
|
|
|
-/* Infrastructure to implement the synchronize_() primitives. */
|
|
|
-
|
|
|
-struct rcu_synchronize {
|
|
|
- struct rcu_head head;
|
|
|
- struct completion completion;
|
|
|
-};
|
|
|
-
|
|
|
-extern void wakeme_after_rcu(struct rcu_head *head);
|
|
|
-
|
|
|
-#ifdef CONFIG_PREEMPT_RCU
|
|
|
-
|
|
|
-/**
|
|
|
- * call_rcu() - Queue an RCU callback for invocation after a grace period.
|
|
|
- * @head: structure to be used for queueing the RCU updates.
|
|
|
- * @func: actual callback function to be invoked after the grace period
|
|
|
- *
|
|
|
- * The callback function will be invoked some time after a full grace
|
|
|
- * period elapses, in other words after all pre-existing RCU read-side
|
|
|
- * critical sections have completed. However, the callback function
|
|
|
- * might well execute concurrently with RCU read-side critical sections
|
|
|
- * that started after call_rcu() was invoked. RCU read-side critical
|
|
|
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
|
|
|
- * and may be nested.
|
|
|
- */
|
|
|
-extern void call_rcu(struct rcu_head *head,
|
|
|
- void (*func)(struct rcu_head *head));
|
|
|
-
|
|
|
-#else /* #ifdef CONFIG_PREEMPT_RCU */
|
|
|
-
|
|
|
-/* In classic RCU, call_rcu() is just call_rcu_sched(). */
|
|
|
-#define call_rcu call_rcu_sched
|
|
|
-
|
|
|
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
|
|
-
|
|
|
-/**
|
|
|
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
|
|
|
- * @head: structure to be used for queueing the RCU updates.
|
|
|
- * @func: actual callback function to be invoked after the grace period
|
|
|
- *
|
|
|
- * The callback function will be invoked some time after a full grace
|
|
|
- * period elapses, in other words after all currently executing RCU
|
|
|
- * read-side critical sections have completed. call_rcu_bh() assumes
|
|
|
- * that the read-side critical sections end on completion of a softirq
|
|
|
- * handler. This means that read-side critical sections in process
|
|
|
- * context must not be interrupted by softirqs. This interface is to be
|
|
|
- * used when most of the read-side critical sections are in softirq context.
|
|
|
- * RCU read-side critical sections are delimited by :
|
|
|
- * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
|
|
|
- * OR
|
|
|
- * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
|
|
|
- * These may be nested.
|
|
|
- */
|
|
|
-extern void call_rcu_bh(struct rcu_head *head,
|
|
|
- void (*func)(struct rcu_head *head));
|
|
|
-
|
|
|
/*
|
|
|
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
|
|
|
* by call_rcu() and rcu callback execution, and are therefore not part of the
|