|
@@ -4134,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
unsigned long flags;
|
|
|
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
|
|
|
|
|
- cpumask_copy(cpus, cpu_online_mask);
|
|
|
+ cpumask_copy(cpus, cpu_active_mask);
|
|
|
|
|
|
/*
|
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
@@ -4297,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
|
int all_pinned = 0;
|
|
|
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
|
|
|
|
|
- cpumask_copy(cpus, cpu_online_mask);
|
|
|
+ cpumask_copy(cpus, cpu_active_mask);
|
|
|
|
|
|
/*
|
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
@@ -4694,7 +4694,7 @@ int select_nohz_load_balancer(int stop_tick)
|
|
|
cpumask_set_cpu(cpu, nohz.cpu_mask);
|
|
|
|
|
|
/* time for ilb owner also to sleep */
|
|
|
- if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
|
|
|
+ if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
|
|
|
if (atomic_read(&nohz.load_balancer) == cpu)
|
|
|
atomic_set(&nohz.load_balancer, -1);
|
|
|
return 0;
|
|
@@ -7093,7 +7093,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
int ret = 0;
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
- if (!cpumask_intersects(new_mask, cpu_online_mask)) {
|
|
|
+ if (!cpumask_intersects(new_mask, cpu_active_mask)) {
|
|
|
ret = -EINVAL;
|
|
|
goto out;
|
|
|
}
|
|
@@ -7115,7 +7115,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
if (cpumask_test_cpu(task_cpu(p), new_mask))
|
|
|
goto out;
|
|
|
|
|
|
- if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
|
|
|
+ if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
|
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
|
struct task_struct *mt = rq->migration_thread;
|
|
|
|
|
@@ -7269,19 +7269,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
|
|
|
|
again:
|
|
|
/* Look for allowed, online CPU in same node. */
|
|
|
- for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
|
|
|
+ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
|
|
|
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
|
|
goto move;
|
|
|
|
|
|
/* Any allowed, online CPU? */
|
|
|
- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
|
|
|
+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
|
|
|
if (dest_cpu < nr_cpu_ids)
|
|
|
goto move;
|
|
|
|
|
|
/* No more Mr. Nice Guy. */
|
|
|
if (dest_cpu >= nr_cpu_ids) {
|
|
|
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
|
|
|
- dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
|
|
|
+ dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
|
|
|
|
|
|
/*
|
|
|
* Don't tell them about moving exiting tasks or
|
|
@@ -7310,7 +7310,7 @@ move:
|
|
|
*/
|
|
|
static void migrate_nr_uninterruptible(struct rq *rq_src)
|
|
|
{
|
|
|
- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
|
|
|
+ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
@@ -7564,7 +7564,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
|
|
|
static struct ctl_table_header *sd_sysctl_header;
|
|
|
static void register_sched_domain_sysctl(void)
|
|
|
{
|
|
|
- int i, cpu_num = num_online_cpus();
|
|
|
+ int i, cpu_num = num_possible_cpus();
|
|
|
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
|
|
|
char buf[32];
|
|
|
|
|
@@ -7574,7 +7574,7 @@ static void register_sched_domain_sysctl(void)
|
|
|
if (entry == NULL)
|
|
|
return;
|
|
|
|
|
|
- for_each_online_cpu(i) {
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
snprintf(buf, 32, "cpu%d", i);
|
|
|
entry->procname = kstrdup(buf, GFP_KERNEL);
|
|
|
entry->mode = 0555;
|
|
@@ -9100,7 +9100,7 @@ match1:
|
|
|
if (doms_new == NULL) {
|
|
|
ndoms_cur = 0;
|
|
|
doms_new = &fallback_doms;
|
|
|
- cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
|
|
|
+ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
|
|
|
WARN_ON_ONCE(dattr_new);
|
|
|
}
|
|
|
|
|
@@ -9231,8 +9231,10 @@ static int update_sched_domains(struct notifier_block *nfb,
|
|
|
switch (action) {
|
|
|
case CPU_ONLINE:
|
|
|
case CPU_ONLINE_FROZEN:
|
|
|
- case CPU_DEAD:
|
|
|
- case CPU_DEAD_FROZEN:
|
|
|
+ case CPU_DOWN_PREPARE:
|
|
|
+ case CPU_DOWN_PREPARE_FROZEN:
|
|
|
+ case CPU_DOWN_FAILED:
|
|
|
+ case CPU_DOWN_FAILED_FROZEN:
|
|
|
partition_sched_domains(1, NULL, NULL);
|
|
|
return NOTIFY_OK;
|
|
|
|
|
@@ -9279,7 +9281,7 @@ void __init sched_init_smp(void)
|
|
|
#endif
|
|
|
get_online_cpus();
|
|
|
mutex_lock(&sched_domains_mutex);
|
|
|
- arch_init_sched_domains(cpu_online_mask);
|
|
|
+ arch_init_sched_domains(cpu_active_mask);
|
|
|
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
|
|
|
if (cpumask_empty(non_isolated_cpus))
|
|
|
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
|