|
@@ -873,6 +873,29 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
|
|
|
rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Dump stacks of all tasks running on stalled CPUs. This is a fallback
|
|
|
+ * for architectures that do not implement trigger_all_cpu_backtrace().
|
|
|
+ * The NMI-triggered stack traces are more accurate because they are
|
|
|
+ * printed by the target CPU.
|
|
|
+ */
|
|
|
+static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+ unsigned long flags;
|
|
|
+ struct rcu_node *rnp;
|
|
|
+
|
|
|
+ rcu_for_each_leaf_node(rsp, rnp) {
|
|
|
+ raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
+ if (rnp->qsmask != 0) {
|
|
|
+ for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
|
|
|
+ if (rnp->qsmask & (1UL << cpu))
|
|
|
+ dump_cpu_task(rnp->grplo + cpu);
|
|
|
+ }
|
|
|
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void print_other_cpu_stall(struct rcu_state *rsp)
|
|
|
{
|
|
|
int cpu;
|
|
@@ -929,7 +952,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
|
|
if (ndetected == 0)
|
|
|
printk(KERN_ERR "INFO: Stall ended before state dump start\n");
|
|
|
else if (!trigger_all_cpu_backtrace())
|
|
|
- dump_stack();
|
|
|
+ rcu_dump_cpu_stacks(rsp);
|
|
|
|
|
|
/* Complain about tasks blocking the grace period. */
|
|
|
|