|
@@ -508,10 +508,15 @@ EXPORT_SYMBOL_GPL(cpu_clock);
|
|
# define finish_arch_switch(prev) do { } while (0)
|
|
# define finish_arch_switch(prev) do { } while (0)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+static inline int task_current(struct rq *rq, struct task_struct *p)
|
|
|
|
+{
|
|
|
|
+ return rq->curr == p;
|
|
|
|
+}
|
|
|
|
+
|
|
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
|
|
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
|
|
static inline int task_running(struct rq *rq, struct task_struct *p)
|
|
static inline int task_running(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
- return rq->curr == p;
|
|
|
|
|
|
+ return task_current(rq, p);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
|
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
|
@@ -540,7 +545,7 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
return p->oncpu;
|
|
return p->oncpu;
|
|
#else
|
|
#else
|
|
- return rq->curr == p;
|
|
|
|
|
|
+ return task_current(rq, p);
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
@@ -663,6 +668,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
|
|
struct rq *rq = cpu_rq(smp_processor_id());
|
|
struct rq *rq = cpu_rq(smp_processor_id());
|
|
u64 now = sched_clock();
|
|
u64 now = sched_clock();
|
|
|
|
|
|
|
|
+ touch_softlockup_watchdog();
|
|
rq->idle_clock += delta_ns;
|
|
rq->idle_clock += delta_ns;
|
|
/*
|
|
/*
|
|
* Override the previous timestamp and ignore all
|
|
* Override the previous timestamp and ignore all
|
|
@@ -3334,7 +3340,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
rq = task_rq_lock(p, &flags);
|
|
ns = p->se.sum_exec_runtime;
|
|
ns = p->se.sum_exec_runtime;
|
|
- if (rq->curr == p) {
|
|
|
|
|
|
+ if (task_current(rq, p)) {
|
|
update_rq_clock(rq);
|
|
update_rq_clock(rq);
|
|
delta_exec = rq->clock - p->se.exec_start;
|
|
delta_exec = rq->clock - p->se.exec_start;
|
|
if ((s64)delta_exec > 0)
|
|
if ((s64)delta_exec > 0)
|
|
@@ -4021,7 +4027,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
|
|
|
oldprio = p->prio;
|
|
oldprio = p->prio;
|
|
on_rq = p->se.on_rq;
|
|
on_rq = p->se.on_rq;
|
|
- running = task_running(rq, p);
|
|
|
|
|
|
+ running = task_current(rq, p);
|
|
if (on_rq) {
|
|
if (on_rq) {
|
|
dequeue_task(rq, p, 0);
|
|
dequeue_task(rq, p, 0);
|
|
if (running)
|
|
if (running)
|
|
@@ -4332,7 +4338,7 @@ recheck:
|
|
}
|
|
}
|
|
update_rq_clock(rq);
|
|
update_rq_clock(rq);
|
|
on_rq = p->se.on_rq;
|
|
on_rq = p->se.on_rq;
|
|
- running = task_running(rq, p);
|
|
|
|
|
|
+ running = task_current(rq, p);
|
|
if (on_rq) {
|
|
if (on_rq) {
|
|
deactivate_task(rq, p, 0);
|
|
deactivate_task(rq, p, 0);
|
|
if (running)
|
|
if (running)
|
|
@@ -7101,7 +7107,7 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
|
|
|
update_rq_clock(rq);
|
|
update_rq_clock(rq);
|
|
|
|
|
|
- running = task_running(rq, tsk);
|
|
|
|
|
|
+ running = task_current(rq, tsk);
|
|
on_rq = tsk->se.on_rq;
|
|
on_rq = tsk->se.on_rq;
|
|
|
|
|
|
if (on_rq) {
|
|
if (on_rq) {
|