|
@@ -22,6 +22,40 @@
|
|
|
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
|
|
*/
|
|
|
|
|
|
+#include <linux/kthread.h>
|
|
|
+#include <linux/debugfs.h>
|
|
|
+#include <linux/seq_file.h>
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_TRACE
|
|
|
+#define RCU_TRACE(stmt) stmt
|
|
|
+#else /* #ifdef CONFIG_RCU_TRACE */
|
|
|
+#define RCU_TRACE(stmt)
|
|
|
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
|
|
+
|
|
|
+/* Global control variables for rcupdate callback mechanism. */
|
|
|
+struct rcu_ctrlblk {
|
|
|
+ struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
|
|
|
+ struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
|
|
|
+ struct rcu_head **curtail; /* ->next pointer of last CB. */
|
|
|
+ RCU_TRACE(long qlen); /* Number of pending CBs. */
|
|
|
+};
|
|
|
+
|
|
|
+/* Definition for rcupdate control block. */
|
|
|
+static struct rcu_ctrlblk rcu_sched_ctrlblk = {
|
|
|
+ .donetail = &rcu_sched_ctrlblk.rcucblist,
|
|
|
+ .curtail = &rcu_sched_ctrlblk.rcucblist,
|
|
|
+};
|
|
|
+
|
|
|
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
|
|
|
+ .donetail = &rcu_bh_ctrlblk.rcucblist,
|
|
|
+ .curtail = &rcu_bh_ctrlblk.rcucblist,
|
|
|
+};
|
|
|
+
|
|
|
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
+int rcu_scheduler_active __read_mostly;
|
|
|
+EXPORT_SYMBOL_GPL(rcu_scheduler_active);
|
|
|
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
+
|
|
|
#ifdef CONFIG_TINY_PREEMPT_RCU
|
|
|
|
|
|
#include <linux/delay.h>
|
|
@@ -46,17 +80,45 @@ struct rcu_preempt_ctrlblk {
|
|
|
struct list_head *gp_tasks;
|
|
|
/* Pointer to the first task blocking the */
|
|
|
/* current grace period, or NULL if there */
|
|
|
- /* is not such task. */
|
|
|
+ /* is no such task. */
|
|
|
struct list_head *exp_tasks;
|
|
|
/* Pointer to first task blocking the */
|
|
|
/* current expedited grace period, or NULL */
|
|
|
/* if there is no such task. If there */
|
|
|
/* is no current expedited grace period, */
|
|
|
/* then there cannot be any such task. */
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+ struct list_head *boost_tasks;
|
|
|
+ /* Pointer to first task that needs to be */
|
|
|
+ /* priority-boosted, or NULL if no priority */
|
|
|
+ /* boosting is needed. If there is no */
|
|
|
+ /* current or expedited grace period, there */
|
|
|
+ /* can be no such task. */
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
u8 gpnum; /* Current grace period. */
|
|
|
u8 gpcpu; /* Last grace period blocked by the CPU. */
|
|
|
u8 completed; /* Last grace period completed. */
|
|
|
/* If all three are equal, RCU is idle. */
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+ s8 boosted_this_gp; /* Has boosting already happened? */
|
|
|
+ unsigned long boost_time; /* When to start boosting (jiffies) */
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
+#ifdef CONFIG_RCU_TRACE
|
|
|
+ unsigned long n_grace_periods;
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+ unsigned long n_tasks_boosted;
|
|
|
+ unsigned long n_exp_boosts;
|
|
|
+ unsigned long n_normal_boosts;
|
|
|
+ unsigned long n_normal_balk_blkd_tasks;
|
|
|
+ unsigned long n_normal_balk_gp_tasks;
|
|
|
+ unsigned long n_normal_balk_boost_tasks;
|
|
|
+ unsigned long n_normal_balk_boosted;
|
|
|
+ unsigned long n_normal_balk_notyet;
|
|
|
+ unsigned long n_normal_balk_nos;
|
|
|
+ unsigned long n_exp_balk_blkd_tasks;
|
|
|
+ unsigned long n_exp_balk_nos;
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
+#endif /* #ifdef CONFIG_RCU_TRACE */
|
|
|
};
|
|
|
|
|
|
static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
|
|
@@ -121,6 +183,210 @@ static int rcu_preempt_gp_in_progress(void)
|
|
|
return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Advance a ->blkd_tasks-list pointer to the next entry, instead
|
|
|
+ * returning NULL if at the end of the list.
|
|
|
+ */
|
|
|
+static struct list_head *rcu_next_node_entry(struct task_struct *t)
|
|
|
+{
|
|
|
+ struct list_head *np;
|
|
|
+
|
|
|
+ np = t->rcu_node_entry.next;
|
|
|
+ if (np == &rcu_preempt_ctrlblk.blkd_tasks)
|
|
|
+ np = NULL;
|
|
|
+ return np;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_TRACE
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+static void rcu_initiate_boost_trace(void);
|
|
|
+static void rcu_initiate_exp_boost_trace(void);
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Dump additional statistice for TINY_PREEMPT_RCU.
|
|
|
+ */
|
|
|
+static void show_tiny_preempt_stats(struct seq_file *m)
|
|
|
+{
|
|
|
+ seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
|
|
|
+ rcu_preempt_ctrlblk.rcb.qlen,
|
|
|
+ rcu_preempt_ctrlblk.n_grace_periods,
|
|
|
+ rcu_preempt_ctrlblk.gpnum,
|
|
|
+ rcu_preempt_ctrlblk.gpcpu,
|
|
|
+ rcu_preempt_ctrlblk.completed,
|
|
|
+ "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
|
|
|
+ "N."[!rcu_preempt_ctrlblk.gp_tasks],
|
|
|
+ "E."[!rcu_preempt_ctrlblk.exp_tasks]);
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+ seq_printf(m, " ttb=%c btg=",
|
|
|
+ "B."[!rcu_preempt_ctrlblk.boost_tasks]);
|
|
|
+ switch (rcu_preempt_ctrlblk.boosted_this_gp) {
|
|
|
+ case -1:
|
|
|
+ seq_puts(m, "exp");
|
|
|
+ break;
|
|
|
+ case 0:
|
|
|
+ seq_puts(m, "no");
|
|
|
+ break;
|
|
|
+ case 1:
|
|
|
+ seq_puts(m, "begun");
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ seq_puts(m, "done");
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ seq_printf(m, "?%d?", rcu_preempt_ctrlblk.boosted_this_gp);
|
|
|
+ }
|
|
|
+ seq_printf(m, " ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
|
|
|
+ rcu_preempt_ctrlblk.n_tasks_boosted,
|
|
|
+ rcu_preempt_ctrlblk.n_exp_boosts,
|
|
|
+ rcu_preempt_ctrlblk.n_normal_boosts,
|
|
|
+ (int)(jiffies & 0xffff),
|
|
|
+ (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
|
|
|
+ seq_printf(m, " %s: nt=%lu gt=%lu bt=%lu b=%lu ny=%lu nos=%lu\n",
|
|
|
+ "normal balk",
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks,
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_gp_tasks,
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_boost_tasks,
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_boosted,
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_notyet,
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_nos);
|
|
|
+ seq_printf(m, " exp balk: bt=%lu nos=%lu\n",
|
|
|
+ rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks,
|
|
|
+ rcu_preempt_ctrlblk.n_exp_balk_nos);
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* #ifdef CONFIG_RCU_TRACE */
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+
|
|
|
+#include "rtmutex_common.h"
|
|
|
+
|
|
|
+/*
|
|
|
+ * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
|
|
|
+ * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
|
|
|
+ */
|
|
|
+static int rcu_boost(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct rt_mutex mtx;
|
|
|
+ struct list_head *np;
|
|
|
+ struct task_struct *t;
|
|
|
+
|
|
|
+ if (rcu_preempt_ctrlblk.boost_tasks == NULL)
|
|
|
+ return 0; /* Nothing to boost. */
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ rcu_preempt_ctrlblk.boosted_this_gp++;
|
|
|
+ t = container_of(rcu_preempt_ctrlblk.boost_tasks, struct task_struct,
|
|
|
+ rcu_node_entry);
|
|
|
+ np = rcu_next_node_entry(t);
|
|
|
+ rt_mutex_init_proxy_locked(&mtx, t);
|
|
|
+ t->rcu_boost_mutex = &mtx;
|
|
|
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+ rt_mutex_lock(&mtx);
|
|
|
+ RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
|
|
|
+ rcu_preempt_ctrlblk.boosted_this_gp++;
|
|
|
+ rt_mutex_unlock(&mtx);
|
|
|
+ return rcu_preempt_ctrlblk.boost_tasks != NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Check to see if it is now time to start boosting RCU readers blocking
|
|
|
+ * the current grace period, and, if so, tell the rcu_kthread_task to
|
|
|
+ * start boosting them. If there is an expedited boost in progress,
|
|
|
+ * we wait for it to complete.
|
|
|
+ *
|
|
|
+ * If there are no blocked readers blocking the current grace period,
|
|
|
+ * return 0 to let the caller know, otherwise return 1. Note that this
|
|
|
+ * return value is independent of whether or not boosting was done.
|
|
|
+ */
|
|
|
+static int rcu_initiate_boost(void)
|
|
|
+{
|
|
|
+ if (!rcu_preempt_blocked_readers_cgp()) {
|
|
|
+ RCU_TRACE(rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks++);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ if (rcu_preempt_ctrlblk.gp_tasks != NULL &&
|
|
|
+ rcu_preempt_ctrlblk.boost_tasks == NULL &&
|
|
|
+ rcu_preempt_ctrlblk.boosted_this_gp == 0 &&
|
|
|
+ ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) {
|
|
|
+ rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks;
|
|
|
+ invoke_rcu_kthread();
|
|
|
+ RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
|
|
|
+ } else
|
|
|
+ RCU_TRACE(rcu_initiate_boost_trace());
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Initiate boosting for an expedited grace period.
|
|
|
+ */
|
|
|
+static void rcu_initiate_expedited_boost(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ if (!list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) {
|
|
|
+ rcu_preempt_ctrlblk.boost_tasks =
|
|
|
+ rcu_preempt_ctrlblk.blkd_tasks.next;
|
|
|
+ rcu_preempt_ctrlblk.boosted_this_gp = -1;
|
|
|
+ invoke_rcu_kthread();
|
|
|
+ RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
|
|
|
+ } else
|
|
|
+ RCU_TRACE(rcu_initiate_exp_boost_trace());
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Do priority-boost accounting for the start of a new grace period.
|
|
|
+ */
|
|
|
+static void rcu_preempt_boost_start_gp(void)
|
|
|
+{
|
|
|
+ rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
|
|
|
+ if (rcu_preempt_ctrlblk.boosted_this_gp > 0)
|
|
|
+ rcu_preempt_ctrlblk.boosted_this_gp = 0;
|
|
|
+}
|
|
|
+
|
|
|
+#else /* #ifdef CONFIG_RCU_BOOST */
|
|
|
+
|
|
|
+/*
|
|
|
+ * If there is no RCU priority boosting, we don't boost.
|
|
|
+ */
|
|
|
+static int rcu_boost(void)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * If there is no RCU priority boosting, we don't initiate boosting,
|
|
|
+ * but we do indicate whether there are blocked readers blocking the
|
|
|
+ * current grace period.
|
|
|
+ */
|
|
|
+static int rcu_initiate_boost(void)
|
|
|
+{
|
|
|
+ return rcu_preempt_blocked_readers_cgp();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * If there is no RCU priority boosting, we don't initiate expedited boosting.
|
|
|
+ */
|
|
|
+static void rcu_initiate_expedited_boost(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * If there is no RCU priority boosting, nothing to do at grace-period start.
|
|
|
+ */
|
|
|
+static void rcu_preempt_boost_start_gp(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* else #ifdef CONFIG_RCU_BOOST */
|
|
|
+
|
|
|
/*
|
|
|
* Record a preemptible-RCU quiescent state for the specified CPU. Note
|
|
|
* that this just means that the task currently running on the CPU is
|
|
@@ -148,11 +414,14 @@ static void rcu_preempt_cpu_qs(void)
|
|
|
rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
|
|
|
current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
|
|
|
|
|
|
+ /* If there is no GP then there is nothing more to do. */
|
|
|
+ if (!rcu_preempt_gp_in_progress())
|
|
|
+ return;
|
|
|
/*
|
|
|
- * If there is no GP, or if blocked readers are still blocking GP,
|
|
|
- * then there is nothing more to do.
|
|
|
+ * Check up on boosting. If there are no readers blocking the
|
|
|
+ * current grace period, leave.
|
|
|
*/
|
|
|
- if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
|
|
|
+ if (rcu_initiate_boost())
|
|
|
return;
|
|
|
|
|
|
/* Advance callbacks. */
|
|
@@ -164,9 +433,9 @@ static void rcu_preempt_cpu_qs(void)
|
|
|
if (!rcu_preempt_blocked_readers_any())
|
|
|
rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
|
|
|
|
|
|
- /* If there are done callbacks, make RCU_SOFTIRQ process them. */
|
|
|
+ /* If there are done callbacks, cause them to be invoked. */
|
|
|
if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
|
|
|
- raise_softirq(RCU_SOFTIRQ);
|
|
|
+ invoke_rcu_kthread();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -178,12 +447,16 @@ static void rcu_preempt_start_gp(void)
|
|
|
|
|
|
/* Official start of GP. */
|
|
|
rcu_preempt_ctrlblk.gpnum++;
|
|
|
+ RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
|
|
|
|
|
|
/* Any blocked RCU readers block new GP. */
|
|
|
if (rcu_preempt_blocked_readers_any())
|
|
|
rcu_preempt_ctrlblk.gp_tasks =
|
|
|
rcu_preempt_ctrlblk.blkd_tasks.next;
|
|
|
|
|
|
+ /* Set up for RCU priority boosting. */
|
|
|
+ rcu_preempt_boost_start_gp();
|
|
|
+
|
|
|
/* If there is no running reader, CPU is done with GP. */
|
|
|
if (!rcu_preempt_running_reader())
|
|
|
rcu_preempt_cpu_qs();
|
|
@@ -304,14 +577,16 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
|
|
*/
|
|
|
empty = !rcu_preempt_blocked_readers_cgp();
|
|
|
empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
|
|
|
- np = t->rcu_node_entry.next;
|
|
|
- if (np == &rcu_preempt_ctrlblk.blkd_tasks)
|
|
|
- np = NULL;
|
|
|
+ np = rcu_next_node_entry(t);
|
|
|
list_del(&t->rcu_node_entry);
|
|
|
if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
|
|
|
rcu_preempt_ctrlblk.gp_tasks = np;
|
|
|
if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
|
|
|
rcu_preempt_ctrlblk.exp_tasks = np;
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+ if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
|
|
|
+ rcu_preempt_ctrlblk.boost_tasks = np;
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
INIT_LIST_HEAD(&t->rcu_node_entry);
|
|
|
|
|
|
/*
|
|
@@ -331,6 +606,14 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
|
|
if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
|
|
|
rcu_report_exp_done();
|
|
|
}
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+ /* Unboost self if was boosted. */
|
|
|
+ if (special & RCU_READ_UNLOCK_BOOSTED) {
|
|
|
+ t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
|
|
|
+ rt_mutex_unlock(t->rcu_boost_mutex);
|
|
|
+ t->rcu_boost_mutex = NULL;
|
|
|
+ }
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
@@ -374,7 +657,7 @@ static void rcu_preempt_check_callbacks(void)
|
|
|
rcu_preempt_cpu_qs();
|
|
|
if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
|
|
|
rcu_preempt_ctrlblk.rcb.donetail)
|
|
|
- raise_softirq(RCU_SOFTIRQ);
|
|
|
+ invoke_rcu_kthread();
|
|
|
if (rcu_preempt_gp_in_progress() &&
|
|
|
rcu_cpu_blocking_cur_gp() &&
|
|
|
rcu_preempt_running_reader())
|
|
@@ -383,7 +666,7 @@ static void rcu_preempt_check_callbacks(void)
|
|
|
|
|
|
/*
|
|
|
* TINY_PREEMPT_RCU has an extra callback-list tail pointer to
|
|
|
- * update, so this is invoked from __rcu_process_callbacks() to
|
|
|
+ * update, so this is invoked from rcu_process_callbacks() to
|
|
|
* handle that case. Of course, it is invoked for all flavors of
|
|
|
* RCU, but RCU callbacks can appear only on one of the lists, and
|
|
|
* neither ->nexttail nor ->donetail can possibly be NULL, so there
|
|
@@ -400,7 +683,7 @@ static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
|
|
|
*/
|
|
|
static void rcu_preempt_process_callbacks(void)
|
|
|
{
|
|
|
- __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
|
|
|
+ rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -417,6 +700,7 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
|
|
local_irq_save(flags);
|
|
|
*rcu_preempt_ctrlblk.nexttail = head;
|
|
|
rcu_preempt_ctrlblk.nexttail = &head->next;
|
|
|
+ RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
|
|
|
rcu_preempt_start_gp(); /* checks to see if GP needed. */
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -532,6 +816,7 @@ void synchronize_rcu_expedited(void)
|
|
|
|
|
|
/* Wait for tail of ->blkd_tasks list to drain. */
|
|
|
if (rcu_preempted_readers_exp())
|
|
|
+ rcu_initiate_expedited_boost();
|
|
|
wait_event(sync_rcu_preempt_exp_wq,
|
|
|
!rcu_preempted_readers_exp());
|
|
|
|
|
@@ -572,6 +857,27 @@ void exit_rcu(void)
|
|
|
|
|
|
#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
|
|
|
|
|
|
+#ifdef CONFIG_RCU_TRACE
|
|
|
+
|
|
|
+/*
|
|
|
+ * Because preemptible RCU does not exist, it is not necessary to
|
|
|
+ * dump out its statistics.
|
|
|
+ */
|
|
|
+static void show_tiny_preempt_stats(struct seq_file *m)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* #ifdef CONFIG_RCU_TRACE */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Because preemptible RCU does not exist, it is never necessary to
|
|
|
+ * boost preempted RCU readers.
|
|
|
+ */
|
|
|
+static int rcu_boost(void)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Because preemptible RCU does not exist, it never has any callbacks
|
|
|
* to check.
|
|
@@ -599,17 +905,116 @@ static void rcu_preempt_process_callbacks(void)
|
|
|
#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
-
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
|
|
/*
|
|
|
* During boot, we forgive RCU lockdep issues. After this function is
|
|
|
* invoked, we start taking RCU lockdep issues seriously.
|
|
|
*/
|
|
|
-void rcu_scheduler_starting(void)
|
|
|
+void __init rcu_scheduler_starting(void)
|
|
|
{
|
|
|
WARN_ON(nr_context_switches() > 0);
|
|
|
rcu_scheduler_active = 1;
|
|
|
}
|
|
|
|
|
|
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
|
|
|
+#else /* #ifdef CONFIG_RCU_BOOST */
|
|
|
+#define RCU_BOOST_PRIO 1
|
|
|
+#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_TRACE
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_BOOST
|
|
|
+
|
|
|
+static void rcu_initiate_boost_trace(void)
|
|
|
+{
|
|
|
+ if (rcu_preempt_ctrlblk.gp_tasks == NULL)
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_gp_tasks++;
|
|
|
+ else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_boost_tasks++;
|
|
|
+ else if (rcu_preempt_ctrlblk.boosted_this_gp != 0)
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_boosted++;
|
|
|
+ else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_notyet++;
|
|
|
+ else
|
|
|
+ rcu_preempt_ctrlblk.n_normal_balk_nos++;
|
|
|
+}
|
|
|
+
|
|
|
+static void rcu_initiate_exp_boost_trace(void)
|
|
|
+{
|
|
|
+ if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
|
|
|
+ rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks++;
|
|
|
+ else
|
|
|
+ rcu_preempt_ctrlblk.n_exp_balk_nos++;
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
|
+
|
|
|
+static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ rcp->qlen -= n;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Dump statistics for TINY_RCU, such as they are.
|
|
|
+ */
|
|
|
+static int show_tiny_stats(struct seq_file *m, void *unused)
|
|
|
+{
|
|
|
+ show_tiny_preempt_stats(m);
|
|
|
+ seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
|
|
|
+ seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int show_tiny_stats_open(struct inode *inode, struct file *file)
|
|
|
+{
|
|
|
+ return single_open(file, show_tiny_stats, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+static const struct file_operations show_tiny_stats_fops = {
|
|
|
+ .owner = THIS_MODULE,
|
|
|
+ .open = show_tiny_stats_open,
|
|
|
+ .read = seq_read,
|
|
|
+ .llseek = seq_lseek,
|
|
|
+ .release = single_release,
|
|
|
+};
|
|
|
+
|
|
|
+static struct dentry *rcudir;
|
|
|
+
|
|
|
+static int __init rcutiny_trace_init(void)
|
|
|
+{
|
|
|
+ struct dentry *retval;
|
|
|
+
|
|
|
+ rcudir = debugfs_create_dir("rcu", NULL);
|
|
|
+ if (!rcudir)
|
|
|
+ goto free_out;
|
|
|
+ retval = debugfs_create_file("rcudata", 0444, rcudir,
|
|
|
+ NULL, &show_tiny_stats_fops);
|
|
|
+ if (!retval)
|
|
|
+ goto free_out;
|
|
|
+ return 0;
|
|
|
+free_out:
|
|
|
+ debugfs_remove_recursive(rcudir);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void __exit rcutiny_trace_cleanup(void)
|
|
|
+{
|
|
|
+ debugfs_remove_recursive(rcudir);
|
|
|
+}
|
|
|
+
|
|
|
+module_init(rcutiny_trace_init);
|
|
|
+module_exit(rcutiny_trace_cleanup);
|
|
|
+
|
|
|
+MODULE_AUTHOR("Paul E. McKenney");
|
|
|
+MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
|
|
|
+MODULE_LICENSE("GPL");
|
|
|
+
|
|
|
+#endif /* #ifdef CONFIG_RCU_TRACE */
|