|
@@ -98,6 +98,9 @@ struct cpuset {
|
|
/* partition number for rebuild_sched_domains() */
|
|
/* partition number for rebuild_sched_domains() */
|
|
int pn;
|
|
int pn;
|
|
|
|
|
|
|
|
+ /* for custom sched domain */
|
|
|
|
+ int relax_domain_level;
|
|
|
|
+
|
|
/* used for walking a cpuset heirarchy */
|
|
/* used for walking a cpuset heirarchy */
|
|
struct list_head stack_list;
|
|
struct list_head stack_list;
|
|
};
|
|
};
|
|
@@ -478,6 +481,16 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
|
|
return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
|
|
return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void
|
|
|
|
+update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
|
|
|
|
+{
|
|
|
|
+ if (!dattr)
|
|
|
|
+ return;
|
|
|
|
+ if (dattr->relax_domain_level < c->relax_domain_level)
|
|
|
|
+ dattr->relax_domain_level = c->relax_domain_level;
|
|
|
|
+ return;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* rebuild_sched_domains()
|
|
* rebuild_sched_domains()
|
|
*
|
|
*
|
|
@@ -553,12 +566,14 @@ static void rebuild_sched_domains(void)
|
|
int csn; /* how many cpuset ptrs in csa so far */
|
|
int csn; /* how many cpuset ptrs in csa so far */
|
|
int i, j, k; /* indices for partition finding loops */
|
|
int i, j, k; /* indices for partition finding loops */
|
|
cpumask_t *doms; /* resulting partition; i.e. sched domains */
|
|
cpumask_t *doms; /* resulting partition; i.e. sched domains */
|
|
|
|
+ struct sched_domain_attr *dattr; /* attributes for custom domains */
|
|
int ndoms; /* number of sched domains in result */
|
|
int ndoms; /* number of sched domains in result */
|
|
int nslot; /* next empty doms[] cpumask_t slot */
|
|
int nslot; /* next empty doms[] cpumask_t slot */
|
|
|
|
|
|
q = NULL;
|
|
q = NULL;
|
|
csa = NULL;
|
|
csa = NULL;
|
|
doms = NULL;
|
|
doms = NULL;
|
|
|
|
+ dattr = NULL;
|
|
|
|
|
|
/* Special case for the 99% of systems with one, full, sched domain */
|
|
/* Special case for the 99% of systems with one, full, sched domain */
|
|
if (is_sched_load_balance(&top_cpuset)) {
|
|
if (is_sched_load_balance(&top_cpuset)) {
|
|
@@ -566,6 +581,11 @@ static void rebuild_sched_domains(void)
|
|
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
|
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
|
if (!doms)
|
|
if (!doms)
|
|
goto rebuild;
|
|
goto rebuild;
|
|
|
|
+ dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
|
|
|
|
+ if (dattr) {
|
|
|
|
+ *dattr = SD_ATTR_INIT;
|
|
|
|
+ update_domain_attr(dattr, &top_cpuset);
|
|
|
|
+ }
|
|
*doms = top_cpuset.cpus_allowed;
|
|
*doms = top_cpuset.cpus_allowed;
|
|
goto rebuild;
|
|
goto rebuild;
|
|
}
|
|
}
|
|
@@ -622,6 +642,7 @@ restart:
|
|
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
|
|
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
|
|
if (!doms)
|
|
if (!doms)
|
|
goto rebuild;
|
|
goto rebuild;
|
|
|
|
+ dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
|
|
|
|
|
|
for (nslot = 0, i = 0; i < csn; i++) {
|
|
for (nslot = 0, i = 0; i < csn; i++) {
|
|
struct cpuset *a = csa[i];
|
|
struct cpuset *a = csa[i];
|
|
@@ -644,12 +665,15 @@ restart:
|
|
}
|
|
}
|
|
|
|
|
|
cpus_clear(*dp);
|
|
cpus_clear(*dp);
|
|
|
|
+ if (dattr)
|
|
|
|
+ *(dattr + nslot) = SD_ATTR_INIT;
|
|
for (j = i; j < csn; j++) {
|
|
for (j = i; j < csn; j++) {
|
|
struct cpuset *b = csa[j];
|
|
struct cpuset *b = csa[j];
|
|
|
|
|
|
if (apn == b->pn) {
|
|
if (apn == b->pn) {
|
|
cpus_or(*dp, *dp, b->cpus_allowed);
|
|
cpus_or(*dp, *dp, b->cpus_allowed);
|
|
b->pn = -1;
|
|
b->pn = -1;
|
|
|
|
+ update_domain_attr(dattr, b);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
nslot++;
|
|
nslot++;
|
|
@@ -660,7 +684,7 @@ restart:
|
|
rebuild:
|
|
rebuild:
|
|
/* Have scheduler rebuild sched domains */
|
|
/* Have scheduler rebuild sched domains */
|
|
get_online_cpus();
|
|
get_online_cpus();
|
|
- partition_sched_domains(ndoms, doms);
|
|
|
|
|
|
+ partition_sched_domains(ndoms, doms, dattr);
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
|
|
|
|
done:
|
|
done:
|
|
@@ -668,6 +692,7 @@ done:
|
|
kfifo_free(q);
|
|
kfifo_free(q);
|
|
kfree(csa);
|
|
kfree(csa);
|
|
/* Don't kfree(doms) -- partition_sched_domains() does that. */
|
|
/* Don't kfree(doms) -- partition_sched_domains() does that. */
|
|
|
|
+ /* Don't kfree(dattr) -- partition_sched_domains() does that. */
|
|
}
|
|
}
|
|
|
|
|
|
static inline int started_after_time(struct task_struct *t1,
|
|
static inline int started_after_time(struct task_struct *t1,
|
|
@@ -1011,6 +1036,21 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int update_relax_domain_level(struct cpuset *cs, char *buf)
|
|
|
|
+{
|
|
|
|
+ int val = simple_strtol(buf, NULL, 10);
|
|
|
|
+
|
|
|
|
+ if (val < 0)
|
|
|
|
+ val = -1;
|
|
|
|
+
|
|
|
|
+ if (val != cs->relax_domain_level) {
|
|
|
|
+ cs->relax_domain_level = val;
|
|
|
|
+ rebuild_sched_domains();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* update_flag - read a 0 or a 1 in a file and update associated flag
|
|
* update_flag - read a 0 or a 1 in a file and update associated flag
|
|
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
|
|
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
|
|
@@ -1202,6 +1242,7 @@ typedef enum {
|
|
FILE_CPU_EXCLUSIVE,
|
|
FILE_CPU_EXCLUSIVE,
|
|
FILE_MEM_EXCLUSIVE,
|
|
FILE_MEM_EXCLUSIVE,
|
|
FILE_SCHED_LOAD_BALANCE,
|
|
FILE_SCHED_LOAD_BALANCE,
|
|
|
|
+ FILE_SCHED_RELAX_DOMAIN_LEVEL,
|
|
FILE_MEMORY_PRESSURE_ENABLED,
|
|
FILE_MEMORY_PRESSURE_ENABLED,
|
|
FILE_MEMORY_PRESSURE,
|
|
FILE_MEMORY_PRESSURE,
|
|
FILE_SPREAD_PAGE,
|
|
FILE_SPREAD_PAGE,
|
|
@@ -1256,6 +1297,9 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
|
|
case FILE_SCHED_LOAD_BALANCE:
|
|
case FILE_SCHED_LOAD_BALANCE:
|
|
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
|
|
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
|
|
break;
|
|
break;
|
|
|
|
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
|
|
|
+ retval = update_relax_domain_level(cs, buffer);
|
|
|
|
+ break;
|
|
case FILE_MEMORY_MIGRATE:
|
|
case FILE_MEMORY_MIGRATE:
|
|
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
|
|
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
|
|
break;
|
|
break;
|
|
@@ -1354,6 +1398,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
|
|
case FILE_SCHED_LOAD_BALANCE:
|
|
case FILE_SCHED_LOAD_BALANCE:
|
|
*s++ = is_sched_load_balance(cs) ? '1' : '0';
|
|
*s++ = is_sched_load_balance(cs) ? '1' : '0';
|
|
break;
|
|
break;
|
|
|
|
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
|
|
|
+ s += sprintf(s, "%d", cs->relax_domain_level);
|
|
|
|
+ break;
|
|
case FILE_MEMORY_MIGRATE:
|
|
case FILE_MEMORY_MIGRATE:
|
|
*s++ = is_memory_migrate(cs) ? '1' : '0';
|
|
*s++ = is_memory_migrate(cs) ? '1' : '0';
|
|
break;
|
|
break;
|
|
@@ -1424,6 +1471,13 @@ static struct cftype cft_sched_load_balance = {
|
|
.private = FILE_SCHED_LOAD_BALANCE,
|
|
.private = FILE_SCHED_LOAD_BALANCE,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static struct cftype cft_sched_relax_domain_level = {
|
|
|
|
+ .name = "sched_relax_domain_level",
|
|
|
|
+ .read = cpuset_common_file_read,
|
|
|
|
+ .write = cpuset_common_file_write,
|
|
|
|
+ .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
|
|
|
|
+};
|
|
|
|
+
|
|
static struct cftype cft_memory_migrate = {
|
|
static struct cftype cft_memory_migrate = {
|
|
.name = "memory_migrate",
|
|
.name = "memory_migrate",
|
|
.read = cpuset_common_file_read,
|
|
.read = cpuset_common_file_read,
|
|
@@ -1475,6 +1529,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
|
return err;
|
|
return err;
|
|
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
|
|
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
|
|
return err;
|
|
return err;
|
|
|
|
+ if ((err = cgroup_add_file(cont, ss,
|
|
|
|
+ &cft_sched_relax_domain_level)) < 0)
|
|
|
|
+ return err;
|
|
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
|
|
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
|
|
return err;
|
|
return err;
|
|
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
|
|
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
|
|
@@ -1559,6 +1616,7 @@ static struct cgroup_subsys_state *cpuset_create(
|
|
nodes_clear(cs->mems_allowed);
|
|
nodes_clear(cs->mems_allowed);
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
cs->mems_generation = cpuset_mems_generation++;
|
|
fmeter_init(&cs->fmeter);
|
|
fmeter_init(&cs->fmeter);
|
|
|
|
+ cs->relax_domain_level = -1;
|
|
|
|
|
|
cs->parent = parent;
|
|
cs->parent = parent;
|
|
number_of_cpusets++;
|
|
number_of_cpusets++;
|
|
@@ -1631,6 +1689,7 @@ int __init cpuset_init(void)
|
|
fmeter_init(&top_cpuset.fmeter);
|
|
fmeter_init(&top_cpuset.fmeter);
|
|
top_cpuset.mems_generation = cpuset_mems_generation++;
|
|
top_cpuset.mems_generation = cpuset_mems_generation++;
|
|
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
|
|
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
|
|
|
|
+ top_cpuset.relax_domain_level = -1;
|
|
|
|
|
|
err = register_filesystem(&cpuset_fs_type);
|
|
err = register_filesystem(&cpuset_fs_type);
|
|
if (err < 0)
|
|
if (err < 0)
|