|
@@ -992,7 +992,7 @@ static int effective_prio(struct task_struct *p)
|
|
|
*/
|
|
|
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
|
|
{
|
|
|
- if (p->state == TASK_UNINTERRUPTIBLE)
|
|
|
+ if (task_contributes_to_load(p))
|
|
|
rq->nr_uninterruptible--;
|
|
|
|
|
|
enqueue_task(rq, p, wakeup);
|
|
@@ -1004,7 +1004,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
|
|
|
*/
|
|
|
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
|
|
|
{
|
|
|
- if (p->state == TASK_UNINTERRUPTIBLE)
|
|
|
+ if (task_contributes_to_load(p))
|
|
|
rq->nr_uninterruptible++;
|
|
|
|
|
|
dequeue_task(rq, p, sleep);
|
|
@@ -1646,8 +1646,7 @@ out:
|
|
|
|
|
|
int fastcall wake_up_process(struct task_struct *p)
|
|
|
{
|
|
|
- return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
|
|
|
- TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
|
|
|
+ return try_to_wake_up(p, TASK_ALL, 0);
|
|
|
}
|
|
|
EXPORT_SYMBOL(wake_up_process);
|
|
|
|
|
@@ -3857,8 +3856,7 @@ void complete(struct completion *x)
|
|
|
|
|
|
spin_lock_irqsave(&x->wait.lock, flags);
|
|
|
x->done++;
|
|
|
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
|
|
|
- 1, 0, NULL);
|
|
|
+ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
|
|
|
spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL(complete);
|
|
@@ -3869,8 +3867,7 @@ void complete_all(struct completion *x)
|
|
|
|
|
|
spin_lock_irqsave(&x->wait.lock, flags);
|
|
|
x->done += UINT_MAX/2;
|
|
|
- __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
|
|
|
- 0, 0, NULL);
|
|
|
+ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
|
|
|
spin_unlock_irqrestore(&x->wait.lock, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL(complete_all);
|