|
@@ -16,6 +16,8 @@
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/utsname.h>
|
|
#include <linux/utsname.h>
|
|
|
|
|
|
|
|
+static DEFINE_SPINLOCK(sched_debug_lock);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* This allows printing both to /proc/sched_debug and
|
|
* This allows printing both to /proc/sched_debug and
|
|
* to the console
|
|
* to the console
|
|
@@ -86,6 +88,23 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
+static char group_path[PATH_MAX];
|
|
|
|
+
|
|
|
|
+static char *task_group_path(struct task_group *tg)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * May be NULL if the underlying cgroup isn't fully-created yet
|
|
|
|
+ */
|
|
|
|
+ if (!tg->css.cgroup) {
|
|
|
|
+ group_path[0] = '\0';
|
|
|
|
+ return group_path;
|
|
|
|
+ }
|
|
|
|
+ cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
|
|
|
|
+ return group_path;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
static void
|
|
static void
|
|
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
|
|
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
@@ -108,6 +127,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
|
|
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
|
|
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
|
|
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
|
|
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
|
|
#endif
|
|
#endif
|
|
|
|
+#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
+ SEQ_printf(m, " %s", task_group_path(task_group(p)));
|
|
|
|
+#endif
|
|
|
|
|
|
SEQ_printf(m, "\n");
|
|
SEQ_printf(m, "\n");
|
|
}
|
|
}
|
|
@@ -144,7 +166,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
|
struct sched_entity *last;
|
|
struct sched_entity *last;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
+ SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
|
|
|
|
+#else
|
|
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
|
|
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
|
|
|
|
+#endif
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
|
|
SPLIT_NS(cfs_rq->exec_clock));
|
|
SPLIT_NS(cfs_rq->exec_clock));
|
|
|
|
|
|
@@ -191,7 +217,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
|
|
|
|
|
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
|
|
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
|
|
{
|
|
{
|
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
+ SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
|
|
|
|
+#else
|
|
SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
|
|
SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
|
|
|
|
+#endif
|
|
|
|
|
|
#define P(x) \
|
|
#define P(x) \
|
|
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
|
|
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
|
|
@@ -212,6 +242,7 @@ extern __read_mostly int sched_clock_running;
|
|
static void print_cpu(struct seq_file *m, int cpu)
|
|
static void print_cpu(struct seq_file *m, int cpu)
|
|
{
|
|
{
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
#ifdef CONFIG_X86
|
|
#ifdef CONFIG_X86
|
|
{
|
|
{
|
|
@@ -266,10 +297,14 @@ static void print_cpu(struct seq_file *m, int cpu)
|
|
|
|
|
|
#undef P
|
|
#undef P
|
|
#endif
|
|
#endif
|
|
|
|
+ spin_lock_irqsave(&sched_debug_lock, flags);
|
|
print_cfs_stats(m, cpu);
|
|
print_cfs_stats(m, cpu);
|
|
print_rt_stats(m, cpu);
|
|
print_rt_stats(m, cpu);
|
|
|
|
|
|
|
|
+ rcu_read_lock();
|
|
print_rq(m, rq, cpu);
|
|
print_rq(m, rq, cpu);
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ spin_unlock_irqrestore(&sched_debug_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static const char *sched_tunable_scaling_names[] = {
|
|
static const char *sched_tunable_scaling_names[] = {
|