|
@@ -77,6 +77,7 @@
|
|
|
#include <asm/irq_regs.h>
|
|
|
|
|
|
#include "sched_cpupri.h"
|
|
|
+#include "workqueue_sched.h"
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
#include <trace/events/sched.h>
|
|
@@ -2267,11 +2268,55 @@ static void update_avg(u64 *avg, u64 sample)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-/***
|
|
|
+static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
|
|
|
+ bool is_sync, bool is_migrate, bool is_local,
|
|
|
+ unsigned long en_flags)
|
|
|
+{
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups);
|
|
|
+ if (is_sync)
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_sync);
|
|
|
+ if (is_migrate)
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_migrate);
|
|
|
+ if (is_local)
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_local);
|
|
|
+ else
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_remote);
|
|
|
+
|
|
|
+ activate_task(rq, p, en_flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
|
|
|
+ int wake_flags, bool success)
|
|
|
+{
|
|
|
+ trace_sched_wakeup(p, success);
|
|
|
+ check_preempt_curr(rq, p, wake_flags);
|
|
|
+
|
|
|
+ p->state = TASK_RUNNING;
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ if (p->sched_class->task_woken)
|
|
|
+ p->sched_class->task_woken(rq, p);
|
|
|
+
|
|
|
+ if (unlikely(rq->idle_stamp)) {
|
|
|
+ u64 delta = rq->clock - rq->idle_stamp;
|
|
|
+ u64 max = 2*sysctl_sched_migration_cost;
|
|
|
+
|
|
|
+ if (delta > max)
|
|
|
+ rq->avg_idle = max;
|
|
|
+ else
|
|
|
+ update_avg(&rq->avg_idle, delta);
|
|
|
+ rq->idle_stamp = 0;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ /* if a worker is waking up, notify workqueue */
|
|
|
+ if ((p->flags & PF_WQ_WORKER) && success)
|
|
|
+ wq_worker_waking_up(p, cpu_of(rq));
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
* try_to_wake_up - wake up a thread
|
|
|
- * @p: the to-be-woken-up thread
|
|
|
+ * @p: the thread to be awakened
|
|
|
* @state: the mask of task states that can be woken
|
|
|
- * @sync: do a synchronous wakeup?
|
|
|
+ * @wake_flags: wake modifier flags (WF_*)
|
|
|
*
|
|
|
* Put it on the run-queue if it's not already there. The "current"
|
|
|
* thread is always on the run-queue (except when the actual
|
|
@@ -2279,7 +2324,8 @@ static void update_avg(u64 *avg, u64 sample)
|
|
|
* the simpler "current->state = TASK_RUNNING" to mark yourself
|
|
|
* runnable without the overhead of this.
|
|
|
*
|
|
|
- * returns failure only if the task is already active.
|
|
|
+ * Returns %true if @p was woken up, %false if it was already running
|
|
|
+ * or @state didn't match @p's state.
|
|
|
*/
|
|
|
static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
int wake_flags)
|
|
@@ -2359,38 +2405,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
|
|
|
out_activate:
|
|
|
#endif /* CONFIG_SMP */
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups);
|
|
|
- if (wake_flags & WF_SYNC)
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_sync);
|
|
|
- if (orig_cpu != cpu)
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_migrate);
|
|
|
- if (cpu == this_cpu)
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_local);
|
|
|
- else
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_remote);
|
|
|
- activate_task(rq, p, en_flags);
|
|
|
+ ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
|
|
|
+ cpu == this_cpu, en_flags);
|
|
|
success = 1;
|
|
|
-
|
|
|
out_running:
|
|
|
- trace_sched_wakeup(p, success);
|
|
|
- check_preempt_curr(rq, p, wake_flags);
|
|
|
-
|
|
|
- p->state = TASK_RUNNING;
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- if (p->sched_class->task_woken)
|
|
|
- p->sched_class->task_woken(rq, p);
|
|
|
-
|
|
|
- if (unlikely(rq->idle_stamp)) {
|
|
|
- u64 delta = rq->clock - rq->idle_stamp;
|
|
|
- u64 max = 2*sysctl_sched_migration_cost;
|
|
|
-
|
|
|
- if (delta > max)
|
|
|
- rq->avg_idle = max;
|
|
|
- else
|
|
|
- update_avg(&rq->avg_idle, delta);
|
|
|
- rq->idle_stamp = 0;
|
|
|
- }
|
|
|
-#endif
|
|
|
+ ttwu_post_activation(p, rq, wake_flags, success);
|
|
|
out:
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
put_cpu();
|
|
@@ -2398,6 +2417,37 @@ out:
|
|
|
return success;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * try_to_wake_up_local - try to wake up a local task with rq lock held
|
|
|
+ * @p: the thread to be awakened
|
|
|
+ *
|
|
|
+ * Put @p on the run-queue if it's not alredy there. The caller must
|
|
|
+ * ensure that this_rq() is locked, @p is bound to this_rq() and not
|
|
|
+ * the current task. this_rq() stays locked over invocation.
|
|
|
+ */
|
|
|
+static void try_to_wake_up_local(struct task_struct *p)
|
|
|
+{
|
|
|
+ struct rq *rq = task_rq(p);
|
|
|
+ bool success = false;
|
|
|
+
|
|
|
+ BUG_ON(rq != this_rq());
|
|
|
+ BUG_ON(p == current);
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
+
|
|
|
+ if (!(p->state & TASK_NORMAL))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!p->se.on_rq) {
|
|
|
+ if (likely(!task_running(rq, p))) {
|
|
|
+ schedstat_inc(rq, ttwu_count);
|
|
|
+ schedstat_inc(rq, ttwu_local);
|
|
|
+ }
|
|
|
+ ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
|
|
|
+ success = true;
|
|
|
+ }
|
|
|
+ ttwu_post_activation(p, rq, 0, success);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* wake_up_process - Wake up a specific process
|
|
|
* @p: The process to be woken up.
|
|
@@ -3603,10 +3653,24 @@ need_resched_nonpreemptible:
|
|
|
clear_tsk_need_resched(prev);
|
|
|
|
|
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
|
|
- if (unlikely(signal_pending_state(prev->state, prev)))
|
|
|
+ if (unlikely(signal_pending_state(prev->state, prev))) {
|
|
|
prev->state = TASK_RUNNING;
|
|
|
- else
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * If a worker is going to sleep, notify and
|
|
|
+ * ask workqueue whether it wants to wake up a
|
|
|
+ * task to maintain concurrency. If so, wake
|
|
|
+ * up the task.
|
|
|
+ */
|
|
|
+ if (prev->flags & PF_WQ_WORKER) {
|
|
|
+ struct task_struct *to_wakeup;
|
|
|
+
|
|
|
+ to_wakeup = wq_worker_sleeping(prev, cpu);
|
|
|
+ if (to_wakeup)
|
|
|
+ try_to_wake_up_local(to_wakeup);
|
|
|
+ }
|
|
|
deactivate_task(rq, prev, DEQUEUE_SLEEP);
|
|
|
+ }
|
|
|
switch_count = &prev->nvcsw;
|
|
|
}
|
|
|
|
|
@@ -5801,20 +5865,49 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
*/
|
|
|
static struct notifier_block __cpuinitdata migration_notifier = {
|
|
|
.notifier_call = migration_call,
|
|
|
- .priority = 10
|
|
|
+ .priority = CPU_PRI_MIGRATION,
|
|
|
};
|
|
|
|
|
|
+static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
|
|
|
+ unsigned long action, void *hcpu)
|
|
|
+{
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
+ case CPU_ONLINE:
|
|
|
+ case CPU_DOWN_FAILED:
|
|
|
+ set_cpu_active((long)hcpu, true);
|
|
|
+ return NOTIFY_OK;
|
|
|
+ default:
|
|
|
+ return NOTIFY_DONE;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
|
|
|
+ unsigned long action, void *hcpu)
|
|
|
+{
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
+ case CPU_DOWN_PREPARE:
|
|
|
+ set_cpu_active((long)hcpu, false);
|
|
|
+ return NOTIFY_OK;
|
|
|
+ default:
|
|
|
+ return NOTIFY_DONE;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int __init migration_init(void)
|
|
|
{
|
|
|
void *cpu = (void *)(long)smp_processor_id();
|
|
|
int err;
|
|
|
|
|
|
- /* Start one for the boot CPU: */
|
|
|
+ /* Initialize migration for the boot CPU */
|
|
|
err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
|
|
|
BUG_ON(err == NOTIFY_BAD);
|
|
|
migration_call(&migration_notifier, CPU_ONLINE, cpu);
|
|
|
register_cpu_notifier(&migration_notifier);
|
|
|
|
|
|
+ /* Register cpu active notifiers */
|
|
|
+ cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
|
|
|
+ cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
early_initcall(migration_init);
|
|
@@ -7273,29 +7366,35 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
|
|
|
}
|
|
|
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
|
|
|
|
|
|
-#ifndef CONFIG_CPUSETS
|
|
|
/*
|
|
|
- * Add online and remove offline CPUs from the scheduler domains.
|
|
|
- * When cpusets are enabled they take over this function.
|
|
|
+ * Update cpusets according to cpu_active mask. If cpusets are
|
|
|
+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
|
|
|
+ * around partition_sched_domains().
|
|
|
*/
|
|
|
-static int update_sched_domains(struct notifier_block *nfb,
|
|
|
- unsigned long action, void *hcpu)
|
|
|
+static int __cpuexit cpuset_cpu_active(struct notifier_block *nfb,
|
|
|
+ unsigned long action, void *hcpu)
|
|
|
{
|
|
|
- switch (action) {
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
case CPU_ONLINE:
|
|
|
- case CPU_ONLINE_FROZEN:
|
|
|
- case CPU_DOWN_PREPARE:
|
|
|
- case CPU_DOWN_PREPARE_FROZEN:
|
|
|
case CPU_DOWN_FAILED:
|
|
|
- case CPU_DOWN_FAILED_FROZEN:
|
|
|
- partition_sched_domains(1, NULL, NULL);
|
|
|
+ cpuset_update_active_cpus();
|
|
|
return NOTIFY_OK;
|
|
|
+ default:
|
|
|
+ return NOTIFY_DONE;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
+static int __cpuexit cpuset_cpu_inactive(struct notifier_block *nfb,
|
|
|
+ unsigned long action, void *hcpu)
|
|
|
+{
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
+ case CPU_DOWN_PREPARE:
|
|
|
+ cpuset_update_active_cpus();
|
|
|
+ return NOTIFY_OK;
|
|
|
default:
|
|
|
return NOTIFY_DONE;
|
|
|
}
|
|
|
}
|
|
|
-#endif
|
|
|
|
|
|
static int update_runtime(struct notifier_block *nfb,
|
|
|
unsigned long action, void *hcpu)
|
|
@@ -7341,10 +7440,8 @@ void __init sched_init_smp(void)
|
|
|
mutex_unlock(&sched_domains_mutex);
|
|
|
put_online_cpus();
|
|
|
|
|
|
-#ifndef CONFIG_CPUSETS
|
|
|
- /* XXX: Theoretical race here - CPU may be hotplugged now */
|
|
|
- hotcpu_notifier(update_sched_domains, 0);
|
|
|
-#endif
|
|
|
+ hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
|
|
|
+ hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
|
|
|
|
|
|
/* RT runtime code needs to handle some hotplug events */
|
|
|
hotcpu_notifier(update_runtime, 0);
|