|
@@ -594,18 +594,14 @@ enum {
|
|
|
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
|
|
|
SCHED_FEAT_WAKEUP_PREEMPT = 2,
|
|
|
SCHED_FEAT_START_DEBIT = 4,
|
|
|
- SCHED_FEAT_TREE_AVG = 8,
|
|
|
- SCHED_FEAT_APPROX_AVG = 16,
|
|
|
- SCHED_FEAT_HRTICK = 32,
|
|
|
- SCHED_FEAT_DOUBLE_TICK = 64,
|
|
|
+ SCHED_FEAT_HRTICK = 8,
|
|
|
+ SCHED_FEAT_DOUBLE_TICK = 16,
|
|
|
};
|
|
|
|
|
|
const_debug unsigned int sysctl_sched_features =
|
|
|
SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
|
|
|
SCHED_FEAT_WAKEUP_PREEMPT * 1 |
|
|
|
SCHED_FEAT_START_DEBIT * 1 |
|
|
|
- SCHED_FEAT_TREE_AVG * 0 |
|
|
|
- SCHED_FEAT_APPROX_AVG * 0 |
|
|
|
SCHED_FEAT_HRTICK * 1 |
|
|
|
SCHED_FEAT_DOUBLE_TICK * 0;
|
|
|
|
|
@@ -3886,7 +3882,7 @@ need_resched_nonpreemptible:
|
|
|
|
|
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
|
|
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
|
|
|
- unlikely(signal_pending(prev)))) {
|
|
|
+ signal_pending(prev))) {
|
|
|
prev->state = TASK_RUNNING;
|
|
|
} else {
|
|
|
deactivate_task(rq, prev, 1);
|
|
@@ -6811,6 +6807,10 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
|
|
*/
|
|
|
static cpumask_t fallback_doms;
|
|
|
|
|
|
+void __attribute__((weak)) arch_update_cpu_topology(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
|
|
* For now this just excludes isolated cpus, but could be used to
|
|
@@ -6820,6 +6820,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
|
|
|
{
|
|
|
int err;
|
|
|
|
|
|
+ arch_update_cpu_topology();
|
|
|
ndoms_cur = 1;
|
|
|
doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
|
|
if (!doms_cur)
|
|
@@ -6924,7 +6925,7 @@ match2:
|
|
|
}
|
|
|
|
|
|
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
|
|
|
-static int arch_reinit_sched_domains(void)
|
|
|
+int arch_reinit_sched_domains(void)
|
|
|
{
|
|
|
int err;
|
|
|
|