|
@@ -854,6 +854,12 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
struct rq_iterator *iterator);
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_CGROUP_CPUACCT
|
|
|
+static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
|
|
|
+#else
|
|
|
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
|
|
|
+#endif
|
|
|
+
|
|
|
#include "sched_stats.h"
|
|
|
#include "sched_idletask.c"
|
|
|
#include "sched_fair.c"
|
|
@@ -7221,38 +7227,12 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
|
|
|
return (u64) tg->shares;
|
|
|
}
|
|
|
|
|
|
-static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft)
|
|
|
-{
|
|
|
- struct task_group *tg = cgroup_tg(cgrp);
|
|
|
- unsigned long flags;
|
|
|
- u64 res = 0;
|
|
|
- int i;
|
|
|
-
|
|
|
- for_each_possible_cpu(i) {
|
|
|
- /*
|
|
|
- * Lock to prevent races with updating 64-bit counters
|
|
|
- * on 32-bit arches.
|
|
|
- */
|
|
|
- spin_lock_irqsave(&cpu_rq(i)->lock, flags);
|
|
|
- res += tg->se[i]->sum_exec_runtime;
|
|
|
- spin_unlock_irqrestore(&cpu_rq(i)->lock, flags);
|
|
|
- }
|
|
|
- /* Convert from ns to ms */
|
|
|
- do_div(res, NSEC_PER_MSEC);
|
|
|
-
|
|
|
- return res;
|
|
|
-}
|
|
|
-
|
|
|
static struct cftype cpu_files[] = {
|
|
|
{
|
|
|
.name = "shares",
|
|
|
.read_uint = cpu_shares_read_uint,
|
|
|
.write_uint = cpu_shares_write_uint,
|
|
|
},
|
|
|
- {
|
|
|
- .name = "usage",
|
|
|
- .read_uint = cpu_usage_read,
|
|
|
- },
|
|
|
};
|
|
|
|
|
|
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
@@ -7272,3 +7252,126 @@ struct cgroup_subsys cpu_cgroup_subsys = {
|
|
|
};
|
|
|
|
|
|
#endif /* CONFIG_FAIR_CGROUP_SCHED */
|
|
|
+
|
|
|
+#ifdef CONFIG_CGROUP_CPUACCT
|
|
|
+
|
|
|
+/*
|
|
|
+ * CPU accounting code for task groups.
|
|
|
+ *
|
|
|
+ * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
|
|
|
+ * (balbir@in.ibm.com).
|
|
|
+ */
|
|
|
+
|
|
|
+/* track cpu usage of a group of tasks */
|
|
|
+struct cpuacct {
|
|
|
+ struct cgroup_subsys_state css;
|
|
|
+ /* cpuusage holds pointer to a u64-type object on every cpu */
|
|
|
+ u64 *cpuusage;
|
|
|
+};
|
|
|
+
|
|
|
+struct cgroup_subsys cpuacct_subsys;
|
|
|
+
|
|
|
+/* return cpu accounting group corresponding to this container */
|
|
|
+static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
|
|
|
+{
|
|
|
+ return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
|
|
|
+ struct cpuacct, css);
|
|
|
+}
|
|
|
+
|
|
|
+/* return cpu accounting group to which this task belongs */
|
|
|
+static inline struct cpuacct *task_ca(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
|
|
|
+ struct cpuacct, css);
|
|
|
+}
|
|
|
+
|
|
|
+/* create a new cpu accounting group */
|
|
|
+static struct cgroup_subsys_state *cpuacct_create(
|
|
|
+ struct cgroup_subsys *ss, struct cgroup *cont)
|
|
|
+{
|
|
|
+ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
|
|
|
+
|
|
|
+ if (!ca)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+
|
|
|
+ ca->cpuusage = alloc_percpu(u64);
|
|
|
+ if (!ca->cpuusage) {
|
|
|
+ kfree(ca);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ }
|
|
|
+
|
|
|
+ return &ca->css;
|
|
|
+}
|
|
|
+
|
|
|
+/* destroy an existing cpu accounting group */
|
|
|
+static void cpuacct_destroy(struct cgroup_subsys *ss,
|
|
|
+ struct cgroup *cont)
|
|
|
+{
|
|
|
+ struct cpuacct *ca = cgroup_ca(cont);
|
|
|
+
|
|
|
+ free_percpu(ca->cpuusage);
|
|
|
+ kfree(ca);
|
|
|
+}
|
|
|
+
|
|
|
+/* return total cpu usage (in nanoseconds) of a group */
|
|
|
+static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
|
|
|
+{
|
|
|
+ struct cpuacct *ca = cgroup_ca(cont);
|
|
|
+ u64 totalcpuusage = 0;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Take rq->lock to make 64-bit addition safe on 32-bit
|
|
|
+ * platforms.
|
|
|
+ */
|
|
|
+ spin_lock_irq(&cpu_rq(i)->lock);
|
|
|
+ totalcpuusage += *cpuusage;
|
|
|
+ spin_unlock_irq(&cpu_rq(i)->lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ return totalcpuusage;
|
|
|
+}
|
|
|
+
|
|
|
+static struct cftype files[] = {
|
|
|
+ {
|
|
|
+ .name = "usage",
|
|
|
+ .read_uint = cpuusage_read,
|
|
|
+ },
|
|
|
+};
|
|
|
+
|
|
|
+static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
|
+{
|
|
|
+ return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * charge this task's execution time to its accounting group.
|
|
|
+ *
|
|
|
+ * called with rq->lock held.
|
|
|
+ */
|
|
|
+static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
|
|
|
+{
|
|
|
+ struct cpuacct *ca;
|
|
|
+
|
|
|
+ if (!cpuacct_subsys.active)
|
|
|
+ return;
|
|
|
+
|
|
|
+ ca = task_ca(tsk);
|
|
|
+ if (ca) {
|
|
|
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
|
|
|
+
|
|
|
+ *cpuusage += cputime;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+struct cgroup_subsys cpuacct_subsys = {
|
|
|
+ .name = "cpuacct",
|
|
|
+ .create = cpuacct_create,
|
|
|
+ .destroy = cpuacct_destroy,
|
|
|
+ .populate = cpuacct_populate,
|
|
|
+ .subsys_id = cpuacct_subsys_id,
|
|
|
+};
|
|
|
+#endif /* CONFIG_CGROUP_CPUACCT */
|