|
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|
|
* default: 0.25ms
|
|
|
*/
|
|
|
unsigned int sysctl_sched_shares_ratelimit = 250000;
|
|
|
+unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
|
|
|
|
|
|
/*
|
|
|
* Inject some fuzzyness into changing the per-cpu group shares
|
|
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
|
|
|
*/
|
|
|
static int tg_shares_up(struct task_group *tg, void *data)
|
|
|
{
|
|
|
- unsigned long weight, rq_weight = 0, shares = 0;
|
|
|
+ unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
|
|
|
unsigned long *usd_rq_weight;
|
|
|
struct sched_domain *sd = data;
|
|
|
unsigned long flags;
|
|
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
|
|
|
weight = tg->cfs_rq[i]->load.weight;
|
|
|
usd_rq_weight[i] = weight;
|
|
|
|
|
|
+ rq_weight += weight;
|
|
|
/*
|
|
|
* If there are currently no tasks on the cpu pretend there
|
|
|
* is one of average load so that when a new task gets to
|
|
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
|
|
|
if (!weight)
|
|
|
weight = NICE_0_LOAD;
|
|
|
|
|
|
- rq_weight += weight;
|
|
|
+ sum_weight += weight;
|
|
|
shares += tg->cfs_rq[i]->shares;
|
|
|
}
|
|
|
|
|
|
+ if (!rq_weight)
|
|
|
+ rq_weight = sum_weight;
|
|
|
+
|
|
|
if ((!shares && rq_weight) || shares > tg->shares)
|
|
|
shares = tg->shares;
|
|
|
|
|
@@ -1810,6 +1815,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
|
|
#endif
|
|
|
|
|
|
static void calc_load_account_active(struct rq *this_rq);
|
|
|
+static void update_sysctl(void);
|
|
|
+static int get_update_sysctl_factor(void);
|
|
|
+
|
|
|
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
+{
|
|
|
+ set_task_rq(p, cpu);
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ /*
|
|
|
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
|
|
|
+ * successfuly executed on another CPU. We must ensure that updates of
|
|
|
+ * per-task data have been completed by this moment.
|
|
|
+ */
|
|
|
+ smp_wmb();
|
|
|
+ task_thread_info(p)->cpu = cpu;
|
|
|
+#endif
|
|
|
+}
|
|
|
|
|
|
#include "sched_stats.h"
|
|
|
#include "sched_idletask.c"
|
|
@@ -1967,20 +1988,6 @@ inline int task_curr(const struct task_struct *p)
|
|
|
return cpu_curr(task_cpu(p)) == p;
|
|
|
}
|
|
|
|
|
|
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
-{
|
|
|
- set_task_rq(p, cpu);
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /*
|
|
|
- * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
|
|
|
- * successfuly executed on another CPU. We must ensure that updates of
|
|
|
- * per-task data have been completed by this moment.
|
|
|
- */
|
|
|
- smp_wmb();
|
|
|
- task_thread_info(p)->cpu = cpu;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
|
|
const struct sched_class *prev_class,
|
|
|
int oldprio, int running)
|
|
@@ -2060,29 +2067,13 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
|
|
|
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|
|
{
|
|
|
int old_cpu = task_cpu(p);
|
|
|
- struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
|
|
|
struct cfs_rq *old_cfsrq = task_cfs_rq(p),
|
|
|
*new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
|
|
|
- u64 clock_offset;
|
|
|
-
|
|
|
- clock_offset = old_rq->clock - new_rq->clock;
|
|
|
|
|
|
trace_sched_migrate_task(p, new_cpu);
|
|
|
|
|
|
-#ifdef CONFIG_SCHEDSTATS
|
|
|
- if (p->se.wait_start)
|
|
|
- p->se.wait_start -= clock_offset;
|
|
|
- if (p->se.sleep_start)
|
|
|
- p->se.sleep_start -= clock_offset;
|
|
|
- if (p->se.block_start)
|
|
|
- p->se.block_start -= clock_offset;
|
|
|
-#endif
|
|
|
if (old_cpu != new_cpu) {
|
|
|
p->se.nr_migrations++;
|
|
|
-#ifdef CONFIG_SCHEDSTATS
|
|
|
- if (task_hot(p, old_rq->clock, NULL))
|
|
|
- schedstat_inc(p, se.nr_forced2_migrations);
|
|
|
-#endif
|
|
|
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
|
|
|
1, 1, NULL, 0);
|
|
|
}
|
|
@@ -2323,6 +2314,14 @@ void task_oncpu_function_call(struct task_struct *p,
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+static inline
|
|
|
+int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
|
|
|
+{
|
|
|
+ return p->sched_class->select_task_rq(p, sd_flags, wake_flags);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/***
|
|
|
* try_to_wake_up - wake up a thread
|
|
|
* @p: the to-be-woken-up thread
|
|
@@ -2374,17 +2373,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
if (task_contributes_to_load(p))
|
|
|
rq->nr_uninterruptible--;
|
|
|
p->state = TASK_WAKING;
|
|
|
- task_rq_unlock(rq, &flags);
|
|
|
+ __task_rq_unlock(rq);
|
|
|
|
|
|
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
|
|
- if (cpu != orig_cpu) {
|
|
|
- local_irq_save(flags);
|
|
|
- rq = cpu_rq(cpu);
|
|
|
- update_rq_clock(rq);
|
|
|
+ cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
|
|
+ if (cpu != orig_cpu)
|
|
|
set_task_cpu(p, cpu);
|
|
|
- local_irq_restore(flags);
|
|
|
- }
|
|
|
- rq = task_rq_lock(p, &flags);
|
|
|
+
|
|
|
+ rq = __task_rq_lock(p);
|
|
|
+ update_rq_clock(rq);
|
|
|
|
|
|
WARN_ON(p->state != TASK_WAKING);
|
|
|
cpu = task_cpu(p);
|
|
@@ -2499,7 +2495,6 @@ static void __sched_fork(struct task_struct *p)
|
|
|
p->se.avg_overlap = 0;
|
|
|
p->se.start_runtime = 0;
|
|
|
p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
|
|
|
- p->se.avg_running = 0;
|
|
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
p->se.wait_start = 0;
|
|
@@ -2521,7 +2516,6 @@ static void __sched_fork(struct task_struct *p)
|
|
|
p->se.nr_failed_migrations_running = 0;
|
|
|
p->se.nr_failed_migrations_hot = 0;
|
|
|
p->se.nr_forced_migrations = 0;
|
|
|
- p->se.nr_forced2_migrations = 0;
|
|
|
|
|
|
p->se.nr_wakeups = 0;
|
|
|
p->se.nr_wakeups_sync = 0;
|
|
@@ -2558,7 +2552,6 @@ static void __sched_fork(struct task_struct *p)
|
|
|
void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
{
|
|
|
int cpu = get_cpu();
|
|
|
- unsigned long flags;
|
|
|
|
|
|
__sched_fork(p);
|
|
|
|
|
@@ -2592,13 +2585,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
|
|
if (!rt_prio(p->prio))
|
|
|
p->sched_class = &fair_sched_class;
|
|
|
|
|
|
+ if (p->sched_class->task_fork)
|
|
|
+ p->sched_class->task_fork(p);
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
|
+ cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
|
#endif
|
|
|
- local_irq_save(flags);
|
|
|
- update_rq_clock(cpu_rq(cpu));
|
|
|
set_task_cpu(p, cpu);
|
|
|
- local_irq_restore(flags);
|
|
|
|
|
|
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
|
|
if (likely(sched_info_on()))
|
|
@@ -2631,17 +2624,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
BUG_ON(p->state != TASK_RUNNING);
|
|
|
update_rq_clock(rq);
|
|
|
-
|
|
|
- if (!p->sched_class->task_new || !current->se.on_rq) {
|
|
|
- activate_task(rq, p, 0);
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Let the scheduling class do new task startup
|
|
|
- * management (if any):
|
|
|
- */
|
|
|
- p->sched_class->task_new(rq, p);
|
|
|
- inc_nr_running(rq);
|
|
|
- }
|
|
|
+ activate_task(rq, p, 0);
|
|
|
trace_sched_wakeup_new(rq, p, 1);
|
|
|
check_preempt_curr(rq, p, WF_FORK);
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -3156,7 +3139,7 @@ out:
|
|
|
void sched_exec(void)
|
|
|
{
|
|
|
int new_cpu, this_cpu = get_cpu();
|
|
|
- new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
|
|
|
+ new_cpu = select_task_rq(current, SD_BALANCE_EXEC, 0);
|
|
|
put_cpu();
|
|
|
if (new_cpu != this_cpu)
|
|
|
sched_migrate_task(current, new_cpu);
|
|
@@ -3172,10 +3155,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
|
|
|
deactivate_task(src_rq, p, 0);
|
|
|
set_task_cpu(p, this_cpu);
|
|
|
activate_task(this_rq, p, 0);
|
|
|
- /*
|
|
|
- * Note that idle threads have a prio of MAX_PRIO, for this test
|
|
|
- * to be always true for them.
|
|
|
- */
|
|
|
check_preempt_curr(this_rq, p, 0);
|
|
|
}
|
|
|
|
|
@@ -4134,7 +4113,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
unsigned long flags;
|
|
|
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
|
|
|
|
|
- cpumask_copy(cpus, cpu_online_mask);
|
|
|
+ cpumask_copy(cpus, cpu_active_mask);
|
|
|
|
|
|
/*
|
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
@@ -4297,7 +4276,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
|
int all_pinned = 0;
|
|
|
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
|
|
|
|
|
- cpumask_copy(cpus, cpu_online_mask);
|
|
|
+ cpumask_copy(cpus, cpu_active_mask);
|
|
|
|
|
|
/*
|
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
@@ -4694,7 +4673,7 @@ int select_nohz_load_balancer(int stop_tick)
|
|
|
cpumask_set_cpu(cpu, nohz.cpu_mask);
|
|
|
|
|
|
/* time for ilb owner also to sleep */
|
|
|
- if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
|
|
|
+ if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
|
|
|
if (atomic_read(&nohz.load_balancer) == cpu)
|
|
|
atomic_set(&nohz.load_balancer, -1);
|
|
|
return 0;
|
|
@@ -5396,13 +5375,14 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-static void put_prev_task(struct rq *rq, struct task_struct *p)
|
|
|
+static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
|
|
{
|
|
|
- u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
|
|
|
+ if (prev->state == TASK_RUNNING) {
|
|
|
+ u64 runtime = prev->se.sum_exec_runtime;
|
|
|
|
|
|
- update_avg(&p->se.avg_running, runtime);
|
|
|
+ runtime -= prev->se.prev_sum_exec_runtime;
|
|
|
+ runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
|
|
|
|
|
|
- if (p->state == TASK_RUNNING) {
|
|
|
/*
|
|
|
* In order to avoid avg_overlap growing stale when we are
|
|
|
* indeed overlapping and hence not getting put to sleep, grow
|
|
@@ -5412,12 +5392,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
|
|
|
* correlates to the amount of cache footprint a task can
|
|
|
* build up.
|
|
|
*/
|
|
|
- runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
|
|
|
- update_avg(&p->se.avg_overlap, runtime);
|
|
|
- } else {
|
|
|
- update_avg(&p->se.avg_running, 0);
|
|
|
+ update_avg(&prev->se.avg_overlap, runtime);
|
|
|
}
|
|
|
- p->sched_class->put_prev_task(rq, p);
|
|
|
+ prev->sched_class->put_prev_task(rq, prev);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -6631,6 +6608,8 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
|
|
|
long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
|
|
{
|
|
|
struct task_struct *p;
|
|
|
+ unsigned long flags;
|
|
|
+ struct rq *rq;
|
|
|
int retval;
|
|
|
|
|
|
get_online_cpus();
|
|
@@ -6645,7 +6624,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
|
|
if (retval)
|
|
|
goto out_unlock;
|
|
|
|
|
|
+ rq = task_rq_lock(p, &flags);
|
|
|
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
|
|
|
out_unlock:
|
|
|
read_unlock(&tasklist_lock);
|
|
@@ -6883,6 +6864,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
|
|
|
{
|
|
|
struct task_struct *p;
|
|
|
unsigned int time_slice;
|
|
|
+ unsigned long flags;
|
|
|
+ struct rq *rq;
|
|
|
int retval;
|
|
|
struct timespec t;
|
|
|
|
|
@@ -6899,7 +6882,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
|
|
|
if (retval)
|
|
|
goto out_unlock;
|
|
|
|
|
|
- time_slice = p->sched_class->get_rr_interval(p);
|
|
|
+ rq = task_rq_lock(p, &flags);
|
|
|
+ time_slice = p->sched_class->get_rr_interval(rq, p);
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
|
|
|
read_unlock(&tasklist_lock);
|
|
|
jiffies_to_timespec(time_slice, &t);
|
|
@@ -7000,7 +6985,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|
|
__sched_fork(idle);
|
|
|
idle->se.exec_start = sched_clock();
|
|
|
|
|
|
- idle->prio = idle->normal_prio = MAX_PRIO;
|
|
|
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
|
|
|
__set_task_cpu(idle, cpu);
|
|
|
|
|
@@ -7041,22 +7025,43 @@ cpumask_var_t nohz_cpu_mask;
|
|
|
*
|
|
|
* This idea comes from the SD scheduler of Con Kolivas:
|
|
|
*/
|
|
|
-static inline void sched_init_granularity(void)
|
|
|
+static int get_update_sysctl_factor(void)
|
|
|
{
|
|
|
- unsigned int factor = 1 + ilog2(num_online_cpus());
|
|
|
- const unsigned long limit = 200000000;
|
|
|
+ unsigned int cpus = min_t(int, num_online_cpus(), 8);
|
|
|
+ unsigned int factor;
|
|
|
+
|
|
|
+ switch (sysctl_sched_tunable_scaling) {
|
|
|
+ case SCHED_TUNABLESCALING_NONE:
|
|
|
+ factor = 1;
|
|
|
+ break;
|
|
|
+ case SCHED_TUNABLESCALING_LINEAR:
|
|
|
+ factor = cpus;
|
|
|
+ break;
|
|
|
+ case SCHED_TUNABLESCALING_LOG:
|
|
|
+ default:
|
|
|
+ factor = 1 + ilog2(cpus);
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- sysctl_sched_min_granularity *= factor;
|
|
|
- if (sysctl_sched_min_granularity > limit)
|
|
|
- sysctl_sched_min_granularity = limit;
|
|
|
+ return factor;
|
|
|
+}
|
|
|
|
|
|
- sysctl_sched_latency *= factor;
|
|
|
- if (sysctl_sched_latency > limit)
|
|
|
- sysctl_sched_latency = limit;
|
|
|
+static void update_sysctl(void)
|
|
|
+{
|
|
|
+ unsigned int factor = get_update_sysctl_factor();
|
|
|
|
|
|
- sysctl_sched_wakeup_granularity *= factor;
|
|
|
+#define SET_SYSCTL(name) \
|
|
|
+ (sysctl_##name = (factor) * normalized_sysctl_##name)
|
|
|
+ SET_SYSCTL(sched_min_granularity);
|
|
|
+ SET_SYSCTL(sched_latency);
|
|
|
+ SET_SYSCTL(sched_wakeup_granularity);
|
|
|
+ SET_SYSCTL(sched_shares_ratelimit);
|
|
|
+#undef SET_SYSCTL
|
|
|
+}
|
|
|
|
|
|
- sysctl_sched_shares_ratelimit *= factor;
|
|
|
+static inline void sched_init_granularity(void)
|
|
|
+{
|
|
|
+ update_sysctl();
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -7093,7 +7098,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
int ret = 0;
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
- if (!cpumask_intersects(new_mask, cpu_online_mask)) {
|
|
|
+ if (!cpumask_intersects(new_mask, cpu_active_mask)) {
|
|
|
ret = -EINVAL;
|
|
|
goto out;
|
|
|
}
|
|
@@ -7115,7 +7120,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
|
if (cpumask_test_cpu(task_cpu(p), new_mask))
|
|
|
goto out;
|
|
|
|
|
|
- if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
|
|
|
+ if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
|
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
|
struct task_struct *mt = rq->migration_thread;
|
|
|
|
|
@@ -7269,19 +7274,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
|
|
|
|
again:
|
|
|
/* Look for allowed, online CPU in same node. */
|
|
|
- for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
|
|
|
+ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
|
|
|
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
|
|
goto move;
|
|
|
|
|
|
/* Any allowed, online CPU? */
|
|
|
- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
|
|
|
+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
|
|
|
if (dest_cpu < nr_cpu_ids)
|
|
|
goto move;
|
|
|
|
|
|
/* No more Mr. Nice Guy. */
|
|
|
if (dest_cpu >= nr_cpu_ids) {
|
|
|
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
|
|
|
- dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
|
|
|
+ dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
|
|
|
|
|
|
/*
|
|
|
* Don't tell them about moving exiting tasks or
|
|
@@ -7310,7 +7315,7 @@ move:
|
|
|
*/
|
|
|
static void migrate_nr_uninterruptible(struct rq *rq_src)
|
|
|
{
|
|
|
- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
|
|
|
+ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
@@ -7563,7 +7568,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
|
|
|
static struct ctl_table_header *sd_sysctl_header;
|
|
|
static void register_sched_domain_sysctl(void)
|
|
|
{
|
|
|
- int i, cpu_num = num_online_cpus();
|
|
|
+ int i, cpu_num = num_possible_cpus();
|
|
|
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
|
|
|
char buf[32];
|
|
|
|
|
@@ -7573,7 +7578,7 @@ static void register_sched_domain_sysctl(void)
|
|
|
if (entry == NULL)
|
|
|
return;
|
|
|
|
|
|
- for_each_online_cpu(i) {
|
|
|
+ for_each_possible_cpu(i) {
|
|
|
snprintf(buf, 32, "cpu%d", i);
|
|
|
entry->procname = kstrdup(buf, GFP_KERNEL);
|
|
|
entry->mode = 0555;
|
|
@@ -7703,7 +7708,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
spin_lock_irq(&rq->lock);
|
|
|
update_rq_clock(rq);
|
|
|
deactivate_task(rq, rq->idle, 0);
|
|
|
- rq->idle->static_prio = MAX_PRIO;
|
|
|
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
|
|
|
rq->idle->sched_class = &idle_sched_class;
|
|
|
migrate_dead_tasks(cpu);
|
|
@@ -9099,7 +9103,7 @@ match1:
|
|
|
if (doms_new == NULL) {
|
|
|
ndoms_cur = 0;
|
|
|
doms_new = &fallback_doms;
|
|
|
- cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
|
|
|
+ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
|
|
|
WARN_ON_ONCE(dattr_new);
|
|
|
}
|
|
|
|
|
@@ -9230,8 +9234,10 @@ static int update_sched_domains(struct notifier_block *nfb,
|
|
|
switch (action) {
|
|
|
case CPU_ONLINE:
|
|
|
case CPU_ONLINE_FROZEN:
|
|
|
- case CPU_DEAD:
|
|
|
- case CPU_DEAD_FROZEN:
|
|
|
+ case CPU_DOWN_PREPARE:
|
|
|
+ case CPU_DOWN_PREPARE_FROZEN:
|
|
|
+ case CPU_DOWN_FAILED:
|
|
|
+ case CPU_DOWN_FAILED_FROZEN:
|
|
|
partition_sched_domains(1, NULL, NULL);
|
|
|
return NOTIFY_OK;
|
|
|
|
|
@@ -9278,7 +9284,7 @@ void __init sched_init_smp(void)
|
|
|
#endif
|
|
|
get_online_cpus();
|
|
|
mutex_lock(&sched_domains_mutex);
|
|
|
- arch_init_sched_domains(cpu_online_mask);
|
|
|
+ arch_init_sched_domains(cpu_active_mask);
|
|
|
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
|
|
|
if (cpumask_empty(non_isolated_cpus))
|
|
|
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
|
|
@@ -9842,13 +9848,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
se = kzalloc_node(sizeof(struct sched_entity),
|
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
|
if (!se)
|
|
|
- goto err;
|
|
|
+ goto err_free_rq;
|
|
|
|
|
|
init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
|
|
|
}
|
|
|
|
|
|
return 1;
|
|
|
|
|
|
+ err_free_rq:
|
|
|
+ kfree(cfs_rq);
|
|
|
err:
|
|
|
return 0;
|
|
|
}
|
|
@@ -9930,13 +9938,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
|
|
|
GFP_KERNEL, cpu_to_node(i));
|
|
|
if (!rt_se)
|
|
|
- goto err;
|
|
|
+ goto err_free_rq;
|
|
|
|
|
|
init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
|
|
|
}
|
|
|
|
|
|
return 1;
|
|
|
|
|
|
+ err_free_rq:
|
|
|
+ kfree(rt_rq);
|
|
|
err:
|
|
|
return 0;
|
|
|
}
|