|
@@ -114,3 +114,163 @@ int rcu_my_thread_group_empty(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
|
|
|
#endif /* #ifdef CONFIG_PROVE_RCU */
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
|
|
|
+static inline void debug_init_rcu_head(struct rcu_head *head)
|
|
|
+{
|
|
|
+ debug_object_init(head, &rcuhead_debug_descr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void debug_rcu_head_free(struct rcu_head *head)
|
|
|
+{
|
|
|
+ debug_object_free(head, &rcuhead_debug_descr);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * fixup_init is called when:
|
|
|
+ * - an active object is initialized
|
|
|
+ */
|
|
|
+static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
|
|
|
+{
|
|
|
+ struct rcu_head *head = addr;
|
|
|
+
|
|
|
+ switch (state) {
|
|
|
+ case ODEBUG_STATE_ACTIVE:
|
|
|
+ /*
|
|
|
+ * Ensure that queued callbacks are all executed.
|
|
|
+ * If we detect that we are nested in a RCU read-side critical
|
|
|
+ * section, we should simply fail, otherwise we would deadlock.
|
|
|
+ */
|
|
|
+ if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
|
|
|
+ irqs_disabled()) {
|
|
|
+ WARN_ON(1);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ rcu_barrier();
|
|
|
+ rcu_barrier_sched();
|
|
|
+ rcu_barrier_bh();
|
|
|
+ debug_object_init(head, &rcuhead_debug_descr);
|
|
|
+ return 1;
|
|
|
+ default:
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * fixup_activate is called when:
|
|
|
+ * - an active object is activated
|
|
|
+ * - an unknown object is activated (might be a statically initialized object)
|
|
|
+ * Activation is performed internally by call_rcu().
|
|
|
+ */
|
|
|
+static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
|
|
|
+{
|
|
|
+ struct rcu_head *head = addr;
|
|
|
+
|
|
|
+ switch (state) {
|
|
|
+
|
|
|
+ case ODEBUG_STATE_NOTAVAILABLE:
|
|
|
+ /*
|
|
|
+ * This is not really a fixup. We just make sure that it is
|
|
|
+ * tracked in the object tracker.
|
|
|
+ */
|
|
|
+ debug_object_init(head, &rcuhead_debug_descr);
|
|
|
+ debug_object_activate(head, &rcuhead_debug_descr);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ case ODEBUG_STATE_ACTIVE:
|
|
|
+ /*
|
|
|
+ * Ensure that queued callbacks are all executed.
|
|
|
+ * If we detect that we are nested in a RCU read-side critical
|
|
|
+ * section, we should simply fail, otherwise we would deadlock.
|
|
|
+ */
|
|
|
+ if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
|
|
|
+ irqs_disabled()) {
|
|
|
+ WARN_ON(1);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ rcu_barrier();
|
|
|
+ rcu_barrier_sched();
|
|
|
+ rcu_barrier_bh();
|
|
|
+ debug_object_activate(head, &rcuhead_debug_descr);
|
|
|
+ return 1;
|
|
|
+ default:
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * fixup_free is called when:
|
|
|
+ * - an active object is freed
|
|
|
+ */
|
|
|
+static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
|
|
|
+{
|
|
|
+ struct rcu_head *head = addr;
|
|
|
+
|
|
|
+ switch (state) {
|
|
|
+ case ODEBUG_STATE_ACTIVE:
|
|
|
+ /*
|
|
|
+ * Ensure that queued callbacks are all executed.
|
|
|
+ * If we detect that we are nested in a RCU read-side critical
|
|
|
+ * section, we should simply fail, otherwise we would deadlock.
|
|
|
+ */
|
|
|
+#ifndef CONFIG_PREEMPT
|
|
|
+ WARN_ON(1);
|
|
|
+ return 0;
|
|
|
+#else
|
|
|
+ if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
|
|
|
+ irqs_disabled()) {
|
|
|
+ WARN_ON(1);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ rcu_barrier();
|
|
|
+ rcu_barrier_sched();
|
|
|
+ rcu_barrier_bh();
|
|
|
+ debug_object_free(head, &rcuhead_debug_descr);
|
|
|
+ return 1;
|
|
|
+#endif
|
|
|
+ default:
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
|
|
|
+ * @head: pointer to rcu_head structure to be initialized
|
|
|
+ *
|
|
|
+ * This function informs debugobjects of a new rcu_head structure that
|
|
|
+ * has been allocated as an auto variable on the stack. This function
|
|
|
+ * is not required for rcu_head structures that are statically defined or
|
|
|
+ * that are dynamically allocated on the heap. This function has no
|
|
|
+ * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
|
|
|
+ */
|
|
|
+void init_rcu_head_on_stack(struct rcu_head *head)
|
|
|
+{
|
|
|
+ debug_object_init_on_stack(head, &rcuhead_debug_descr);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
|
|
|
+
|
|
|
+/**
|
|
|
+ * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
|
|
|
+ * @head: pointer to rcu_head structure to be initialized
|
|
|
+ *
|
|
|
+ * This function informs debugobjects that an on-stack rcu_head structure
|
|
|
+ * is about to go out of scope. As with init_rcu_head_on_stack(), this
|
|
|
+ * function is not required for rcu_head structures that are statically
|
|
|
+ * defined or that are dynamically allocated on the heap. Also as with
|
|
|
+ * init_rcu_head_on_stack(), this function has no effect for
|
|
|
+ * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
|
|
|
+ */
|
|
|
+void destroy_rcu_head_on_stack(struct rcu_head *head)
|
|
|
+{
|
|
|
+ debug_object_free(head, &rcuhead_debug_descr);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
|
|
|
+
|
|
|
+struct debug_obj_descr rcuhead_debug_descr = {
|
|
|
+ .name = "rcu_head",
|
|
|
+ .fixup_init = rcuhead_fixup_init,
|
|
|
+ .fixup_activate = rcuhead_fixup_activate,
|
|
|
+ .fixup_free = rcuhead_fixup_free,
|
|
|
+};
|
|
|
+EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
|
|
|
+#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|