|
@@ -529,6 +529,7 @@ struct rq {
|
|
int push_cpu;
|
|
int push_cpu;
|
|
/* cpu of this runqueue: */
|
|
/* cpu of this runqueue: */
|
|
int cpu;
|
|
int cpu;
|
|
|
|
+ int online;
|
|
|
|
|
|
struct task_struct *migration_thread;
|
|
struct task_struct *migration_thread;
|
|
struct list_head migration_queue;
|
|
struct list_head migration_queue;
|
|
@@ -1498,6 +1499,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#define sched_class_highest (&rt_sched_class)
|
|
#define sched_class_highest (&rt_sched_class)
|
|
|
|
+#define for_each_class(class) \
|
|
|
|
+ for (class = sched_class_highest; class; class = class->next)
|
|
|
|
|
|
static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
|
static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
|
{
|
|
{
|
|
@@ -6065,6 +6068,36 @@ static void unregister_sched_domain_sysctl(void)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+static void set_rq_online(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ if (!rq->online) {
|
|
|
|
+ const struct sched_class *class;
|
|
|
|
+
|
|
|
|
+ cpu_set(rq->cpu, rq->rd->online);
|
|
|
|
+ rq->online = 1;
|
|
|
|
+
|
|
|
|
+ for_each_class(class) {
|
|
|
|
+ if (class->rq_online)
|
|
|
|
+ class->rq_online(rq);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void set_rq_offline(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ if (rq->online) {
|
|
|
|
+ const struct sched_class *class;
|
|
|
|
+
|
|
|
|
+ for_each_class(class) {
|
|
|
|
+ if (class->rq_offline)
|
|
|
|
+ class->rq_offline(rq);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ cpu_clear(rq->cpu, rq->rd->online);
|
|
|
|
+ rq->online = 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* migration_call - callback that gets triggered when a CPU is added.
|
|
* migration_call - callback that gets triggered when a CPU is added.
|
|
* Here we can start up the necessary migration thread for the new CPU.
|
|
* Here we can start up the necessary migration thread for the new CPU.
|
|
@@ -6102,7 +6135,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
if (rq->rd) {
|
|
if (rq->rd) {
|
|
BUG_ON(!cpu_isset(cpu, rq->rd->span));
|
|
BUG_ON(!cpu_isset(cpu, rq->rd->span));
|
|
- cpu_set(cpu, rq->rd->online);
|
|
|
|
|
|
+
|
|
|
|
+ set_rq_online(rq);
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
break;
|
|
break;
|
|
@@ -6163,7 +6197,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
if (rq->rd) {
|
|
if (rq->rd) {
|
|
BUG_ON(!cpu_isset(cpu, rq->rd->span));
|
|
BUG_ON(!cpu_isset(cpu, rq->rd->span));
|
|
- cpu_clear(cpu, rq->rd->online);
|
|
|
|
|
|
+ set_rq_offline(rq);
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
break;
|
|
break;
|
|
@@ -6385,20 +6419,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
|
|
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
|
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- const struct sched_class *class;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
|
|
if (rq->rd) {
|
|
if (rq->rd) {
|
|
struct root_domain *old_rd = rq->rd;
|
|
struct root_domain *old_rd = rq->rd;
|
|
|
|
|
|
- for (class = sched_class_highest; class; class = class->next) {
|
|
|
|
- if (class->leave_domain)
|
|
|
|
- class->leave_domain(rq);
|
|
|
|
- }
|
|
|
|
|
|
+ if (cpu_isset(rq->cpu, old_rd->online))
|
|
|
|
+ set_rq_offline(rq);
|
|
|
|
|
|
cpu_clear(rq->cpu, old_rd->span);
|
|
cpu_clear(rq->cpu, old_rd->span);
|
|
- cpu_clear(rq->cpu, old_rd->online);
|
|
|
|
|
|
|
|
if (atomic_dec_and_test(&old_rd->refcount))
|
|
if (atomic_dec_and_test(&old_rd->refcount))
|
|
kfree(old_rd);
|
|
kfree(old_rd);
|
|
@@ -6409,12 +6439,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
|
|
|
|
|
cpu_set(rq->cpu, rd->span);
|
|
cpu_set(rq->cpu, rd->span);
|
|
if (cpu_isset(rq->cpu, cpu_online_map))
|
|
if (cpu_isset(rq->cpu, cpu_online_map))
|
|
- cpu_set(rq->cpu, rd->online);
|
|
|
|
-
|
|
|
|
- for (class = sched_class_highest; class; class = class->next) {
|
|
|
|
- if (class->join_domain)
|
|
|
|
- class->join_domain(rq);
|
|
|
|
- }
|
|
|
|
|
|
+ set_rq_online(rq);
|
|
|
|
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
spin_unlock_irqrestore(&rq->lock, flags);
|
|
}
|
|
}
|
|
@@ -7824,6 +7849,7 @@ void __init sched_init(void)
|
|
rq->next_balance = jiffies;
|
|
rq->next_balance = jiffies;
|
|
rq->push_cpu = 0;
|
|
rq->push_cpu = 0;
|
|
rq->cpu = i;
|
|
rq->cpu = i;
|
|
|
|
+ rq->online = 0;
|
|
rq->migration_thread = NULL;
|
|
rq->migration_thread = NULL;
|
|
INIT_LIST_HEAD(&rq->migration_queue);
|
|
INIT_LIST_HEAD(&rq->migration_queue);
|
|
rq_attach_root(rq, &def_root_domain);
|
|
rq_attach_root(rq, &def_root_domain);
|