|
@@ -59,9 +59,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
|
|
|
* from dequeue_task() to account for possible rq->clock skew across cpus. The
|
|
|
* delta taken on each cpu would annul the skew.
|
|
|
*/
|
|
|
-static inline void sched_info_dequeued(struct task_struct *t)
|
|
|
+static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
|
|
|
{
|
|
|
- unsigned long long now = rq_clock(task_rq(t)), delta = 0;
|
|
|
+ unsigned long long now = rq_clock(rq), delta = 0;
|
|
|
|
|
|
if (unlikely(sched_info_on()))
|
|
|
if (t->sched_info.last_queued)
|
|
@@ -69,7 +69,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
|
|
|
sched_info_reset_dequeued(t);
|
|
|
t->sched_info.run_delay += delta;
|
|
|
|
|
|
- rq_sched_info_dequeued(task_rq(t), delta);
|
|
|
+ rq_sched_info_dequeued(rq, delta);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -77,9 +77,9 @@ static inline void sched_info_dequeued(struct task_struct *t)
|
|
|
* long it was waiting to run. We also note when it began so that we
|
|
|
* can keep stats on how long its timeslice is.
|
|
|
*/
|
|
|
-static void sched_info_arrive(struct task_struct *t)
|
|
|
+static void sched_info_arrive(struct rq *rq, struct task_struct *t)
|
|
|
{
|
|
|
- unsigned long long now = rq_clock(task_rq(t)), delta = 0;
|
|
|
+ unsigned long long now = rq_clock(rq), delta = 0;
|
|
|
|
|
|
if (t->sched_info.last_queued)
|
|
|
delta = now - t->sched_info.last_queued;
|
|
@@ -88,7 +88,7 @@ static void sched_info_arrive(struct task_struct *t)
|
|
|
t->sched_info.last_arrival = now;
|
|
|
t->sched_info.pcount++;
|
|
|
|
|
|
- rq_sched_info_arrive(task_rq(t), delta);
|
|
|
+ rq_sched_info_arrive(rq, delta);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -96,11 +96,11 @@ static void sched_info_arrive(struct task_struct *t)
|
|
|
* the timestamp if it is already not set. It's assumed that
|
|
|
* sched_info_dequeued() will clear that stamp when appropriate.
|
|
|
*/
|
|
|
-static inline void sched_info_queued(struct task_struct *t)
|
|
|
+static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
|
|
|
{
|
|
|
if (unlikely(sched_info_on()))
|
|
|
if (!t->sched_info.last_queued)
|
|
|
- t->sched_info.last_queued = rq_clock(task_rq(t));
|
|
|
+ t->sched_info.last_queued = rq_clock(rq);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -111,15 +111,15 @@ static inline void sched_info_queued(struct task_struct *t)
|
|
|
* sched_info_queued() to mark that it has now again started waiting on
|
|
|
* the runqueue.
|
|
|
*/
|
|
|
-static inline void sched_info_depart(struct task_struct *t)
|
|
|
+static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
|
|
|
{
|
|
|
- unsigned long long delta = rq_clock(task_rq(t)) -
|
|
|
+ unsigned long long delta = rq_clock(rq) -
|
|
|
t->sched_info.last_arrival;
|
|
|
|
|
|
- rq_sched_info_depart(task_rq(t), delta);
|
|
|
+ rq_sched_info_depart(rq, delta);
|
|
|
|
|
|
if (t->state == TASK_RUNNING)
|
|
|
- sched_info_queued(t);
|
|
|
+ sched_info_queued(rq, t);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -128,32 +128,34 @@ static inline void sched_info_depart(struct task_struct *t)
|
|
|
* the idle task.) We are only called when prev != next.
|
|
|
*/
|
|
|
static inline void
|
|
|
-__sched_info_switch(struct task_struct *prev, struct task_struct *next)
|
|
|
+__sched_info_switch(struct rq *rq,
|
|
|
+ struct task_struct *prev, struct task_struct *next)
|
|
|
{
|
|
|
- struct rq *rq = task_rq(prev);
|
|
|
-
|
|
|
/*
|
|
|
* prev now departs the cpu. It's not interesting to record
|
|
|
* stats about how efficient we were at scheduling the idle
|
|
|
* process, however.
|
|
|
*/
|
|
|
if (prev != rq->idle)
|
|
|
- sched_info_depart(prev);
|
|
|
+ sched_info_depart(rq, prev);
|
|
|
|
|
|
if (next != rq->idle)
|
|
|
- sched_info_arrive(next);
|
|
|
+ sched_info_arrive(rq, next);
|
|
|
}
|
|
|
static inline void
|
|
|
-sched_info_switch(struct task_struct *prev, struct task_struct *next)
|
|
|
+sched_info_switch(struct rq *rq,
|
|
|
+ struct task_struct *prev, struct task_struct *next)
|
|
|
{
|
|
|
if (unlikely(sched_info_on()))
|
|
|
- __sched_info_switch(prev, next);
|
|
|
+ __sched_info_switch(rq, prev, next);
|
|
|
}
|
|
|
#else
|
|
|
-#define sched_info_queued(t) do { } while (0)
|
|
|
+#define sched_info_queued(rq, t) do { } while (0)
|
|
|
#define sched_info_reset_dequeued(t) do { } while (0)
|
|
|
-#define sched_info_dequeued(t) do { } while (0)
|
|
|
-#define sched_info_switch(t, next) do { } while (0)
|
|
|
+#define sched_info_dequeued(rq, t) do { } while (0)
|
|
|
+#define sched_info_depart(rq, t) do { } while (0)
|
|
|
+#define sched_info_arrive(rq, next) do { } while (0)
|
|
|
+#define sched_info_switch(rq, t, next) do { } while (0)
|
|
|
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
|
|
|
|
|
|
/*
|