|
@@ -2267,11 +2267,52 @@ static void update_avg(u64 *avg, u64 sample)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-/***
|
|
|
+static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
|
|
|
+ bool is_sync, bool is_migrate, bool is_local,
|
|
|
+ unsigned long en_flags)
|
|
|
+{
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups);
|
|
|
+ if (is_sync)
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_sync);
|
|
|
+ if (is_migrate)
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_migrate);
|
|
|
+ if (is_local)
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_local);
|
|
|
+ else
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_remote);
|
|
|
+
|
|
|
+ activate_task(rq, p, en_flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
|
|
|
+ int wake_flags, bool success)
|
|
|
+{
|
|
|
+ trace_sched_wakeup(p, success);
|
|
|
+ check_preempt_curr(rq, p, wake_flags);
|
|
|
+
|
|
|
+ p->state = TASK_RUNNING;
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ if (p->sched_class->task_woken)
|
|
|
+ p->sched_class->task_woken(rq, p);
|
|
|
+
|
|
|
+ if (unlikely(rq->idle_stamp)) {
|
|
|
+ u64 delta = rq->clock - rq->idle_stamp;
|
|
|
+ u64 max = 2*sysctl_sched_migration_cost;
|
|
|
+
|
|
|
+ if (delta > max)
|
|
|
+ rq->avg_idle = max;
|
|
|
+ else
|
|
|
+ update_avg(&rq->avg_idle, delta);
|
|
|
+ rq->idle_stamp = 0;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
* try_to_wake_up - wake up a thread
|
|
|
- * @p: the to-be-woken-up thread
|
|
|
+ * @p: the thread to be awakened
|
|
|
* @state: the mask of task states that can be woken
|
|
|
- * @sync: do a synchronous wakeup?
|
|
|
+ * @wake_flags: wake modifier flags (WF_*)
|
|
|
*
|
|
|
* Put it on the run-queue if it's not already there. The "current"
|
|
|
* thread is always on the run-queue (except when the actual
|
|
@@ -2279,7 +2320,8 @@ static void update_avg(u64 *avg, u64 sample)
|
|
|
* the simpler "current->state = TASK_RUNNING" to mark yourself
|
|
|
* runnable without the overhead of this.
|
|
|
*
|
|
|
- * returns failure only if the task is already active.
|
|
|
+ * Returns %true if @p was woken up, %false if it was already running
|
|
|
+ * or @state didn't match @p's state.
|
|
|
*/
|
|
|
static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
int wake_flags)
|
|
@@ -2359,38 +2401,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
|
|
|
out_activate:
|
|
|
#endif /* CONFIG_SMP */
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups);
|
|
|
- if (wake_flags & WF_SYNC)
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_sync);
|
|
|
- if (orig_cpu != cpu)
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_migrate);
|
|
|
- if (cpu == this_cpu)
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_local);
|
|
|
- else
|
|
|
- schedstat_inc(p, se.statistics.nr_wakeups_remote);
|
|
|
- activate_task(rq, p, en_flags);
|
|
|
+ ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
|
|
|
+ cpu == this_cpu, en_flags);
|
|
|
success = 1;
|
|
|
-
|
|
|
out_running:
|
|
|
- trace_sched_wakeup(p, success);
|
|
|
- check_preempt_curr(rq, p, wake_flags);
|
|
|
-
|
|
|
- p->state = TASK_RUNNING;
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- if (p->sched_class->task_woken)
|
|
|
- p->sched_class->task_woken(rq, p);
|
|
|
-
|
|
|
- if (unlikely(rq->idle_stamp)) {
|
|
|
- u64 delta = rq->clock - rq->idle_stamp;
|
|
|
- u64 max = 2*sysctl_sched_migration_cost;
|
|
|
-
|
|
|
- if (delta > max)
|
|
|
- rq->avg_idle = max;
|
|
|
- else
|
|
|
- update_avg(&rq->avg_idle, delta);
|
|
|
- rq->idle_stamp = 0;
|
|
|
- }
|
|
|
-#endif
|
|
|
+ ttwu_post_activation(p, rq, wake_flags, success);
|
|
|
out:
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
put_cpu();
|