|
@@ -2406,20 +2406,43 @@ static void update_avg(u64 *avg, u64 sample)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
|
|
|
- bool is_sync, bool is_migrate, bool is_local,
|
|
|
- unsigned long en_flags)
|
|
|
+static void
|
|
|
+ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
|
|
|
{
|
|
|
+#ifdef CONFIG_SCHEDSTATS
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ int this_cpu = smp_processor_id();
|
|
|
+
|
|
|
+ if (cpu == this_cpu) {
|
|
|
+ schedstat_inc(rq, ttwu_local);
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_local);
|
|
|
+ } else {
|
|
|
+ struct sched_domain *sd;
|
|
|
+
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_remote);
|
|
|
+ for_each_domain(this_cpu, sd) {
|
|
|
+ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
|
|
|
+ schedstat_inc(sd, ttwu_wake_remote);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
+
|
|
|
+ schedstat_inc(rq, ttwu_count);
|
|
|
schedstat_inc(p, se.statistics.nr_wakeups);
|
|
|
- if (is_sync)
|
|
|
+
|
|
|
+ if (wake_flags & WF_SYNC)
|
|
|
schedstat_inc(p, se.statistics.nr_wakeups_sync);
|
|
|
- if (is_migrate)
|
|
|
+
|
|
|
+ if (cpu != task_cpu(p))
|
|
|
schedstat_inc(p, se.statistics.nr_wakeups_migrate);
|
|
|
- if (is_local)
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_local);
|
|
|
- else
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_remote);
|
|
|
|
|
|
+#endif /* CONFIG_SCHEDSTATS */
|
|
|
+}
|
|
|
+
|
|
|
+static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
|
|
|
+{
|
|
|
activate_task(rq, p, en_flags);
|
|
|
|
|
|
/* if a worker is waking up, notify workqueue */
|
|
@@ -2481,12 +2504,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
if (!(p->state & state))
|
|
|
goto out;
|
|
|
|
|
|
+ cpu = task_cpu(p);
|
|
|
+
|
|
|
if (p->se.on_rq)
|
|
|
goto out_running;
|
|
|
|
|
|
- cpu = task_cpu(p);
|
|
|
orig_cpu = cpu;
|
|
|
-
|
|
|
#ifdef CONFIG_SMP
|
|
|
if (unlikely(task_running(rq, p)))
|
|
|
goto out_activate;
|
|
@@ -2527,27 +2550,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
WARN_ON(task_cpu(p) != cpu);
|
|
|
WARN_ON(p->state != TASK_WAKING);
|
|
|
|
|
|
-#ifdef CONFIG_SCHEDSTATS
|
|
|
- schedstat_inc(rq, ttwu_count);
|
|
|
- if (cpu == this_cpu)
|
|
|
- schedstat_inc(rq, ttwu_local);
|
|
|
- else {
|
|
|
- struct sched_domain *sd;
|
|
|
- for_each_domain(this_cpu, sd) {
|
|
|
- if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
|
|
|
- schedstat_inc(sd, ttwu_wake_remote);
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-#endif /* CONFIG_SCHEDSTATS */
|
|
|
-
|
|
|
out_activate:
|
|
|
#endif /* CONFIG_SMP */
|
|
|
- ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
|
|
|
- cpu == this_cpu, en_flags);
|
|
|
+ ttwu_activate(rq, p, en_flags);
|
|
|
out_running:
|
|
|
ttwu_post_activation(p, rq, wake_flags);
|
|
|
+ ttwu_stat(rq, p, cpu, wake_flags);
|
|
|
success = 1;
|
|
|
out:
|
|
|
task_rq_unlock(rq, &flags);
|
|
@@ -2575,14 +2583,11 @@ static void try_to_wake_up_local(struct task_struct *p)
|
|
|
if (!(p->state & TASK_NORMAL))
|
|
|
return;
|
|
|
|
|
|
- if (!p->se.on_rq) {
|
|
|
- if (likely(!task_running(rq, p))) {
|
|
|
- schedstat_inc(rq, ttwu_count);
|
|
|
- schedstat_inc(rq, ttwu_local);
|
|
|
- }
|
|
|
- ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
|
|
|
- }
|
|
|
+ if (!p->se.on_rq)
|
|
|
+ ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
|
|
+
|
|
|
ttwu_post_activation(p, rq, 0);
|
|
|
+ ttwu_stat(rq, p, smp_processor_id(), 0);
|
|
|
}
|
|
|
|
|
|
/**
|