|
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
|
|
|
|
|
|
static void __kthread_parkme(struct kthread *self)
|
|
|
{
|
|
|
- __set_current_state(TASK_INTERRUPTIBLE);
|
|
|
+ __set_current_state(TASK_PARKED);
|
|
|
while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
|
|
|
if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
|
|
|
complete(&self->parked);
|
|
|
schedule();
|
|
|
- __set_current_state(TASK_INTERRUPTIBLE);
|
|
|
+ __set_current_state(TASK_PARKED);
|
|
|
}
|
|
|
clear_bit(KTHREAD_IS_PARKED, &self->flags);
|
|
|
__set_current_state(TASK_RUNNING);
|
|
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
|
|
}
|
|
|
EXPORT_SYMBOL(kthread_create_on_node);
|
|
|
|
|
|
-static void __kthread_bind(struct task_struct *p, unsigned int cpu)
|
|
|
+static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
|
|
|
{
|
|
|
+ /* Must have done schedule() in kthread() before we set_task_cpu */
|
|
|
+ if (!wait_task_inactive(p, state)) {
|
|
|
+ WARN_ON(1);
|
|
|
+ return;
|
|
|
+ }
|
|
|
/* It's safe because the task is inactive. */
|
|
|
do_set_cpus_allowed(p, cpumask_of(cpu));
|
|
|
p->flags |= PF_THREAD_BOUND;
|
|
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
|
|
|
*/
|
|
|
void kthread_bind(struct task_struct *p, unsigned int cpu)
|
|
|
{
|
|
|
- /* Must have done schedule() in kthread() before we set_task_cpu */
|
|
|
- if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
|
|
|
- WARN_ON(1);
|
|
|
- return;
|
|
|
- }
|
|
|
- __kthread_bind(p, cpu);
|
|
|
+ __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
|
|
|
}
|
|
|
EXPORT_SYMBOL(kthread_bind);
|
|
|
|
|
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
|
|
|
+{
|
|
|
+ clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
|
|
|
+ /*
|
|
|
+ * We clear the IS_PARKED bit here as we don't wait
|
|
|
+ * until the task has left the park code. So if we'd
|
|
|
+ * park before that happens we'd see the IS_PARKED bit
|
|
|
+ * which might be about to be cleared.
|
|
|
+ */
|
|
|
+ if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
|
|
|
+ if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
|
|
|
+ __kthread_bind(k, kthread->cpu, TASK_PARKED);
|
|
|
+ wake_up_state(k, TASK_PARKED);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* kthread_unpark - unpark a thread created by kthread_create().
|
|
|
* @k: thread created by kthread_create().
|
|
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
|
|
|
{
|
|
|
struct kthread *kthread = task_get_live_kthread(k);
|
|
|
|
|
|
- if (kthread) {
|
|
|
- clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
|
|
|
- /*
|
|
|
- * We clear the IS_PARKED bit here as we don't wait
|
|
|
- * until the task has left the park code. So if we'd
|
|
|
- * park before that happens we'd see the IS_PARKED bit
|
|
|
- * which might be about to be cleared.
|
|
|
- */
|
|
|
- if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
|
|
|
- if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
|
|
|
- __kthread_bind(k, kthread->cpu);
|
|
|
- wake_up_process(k);
|
|
|
- }
|
|
|
- }
|
|
|
+ if (kthread)
|
|
|
+ __kthread_unpark(k, kthread);
|
|
|
put_task_struct(k);
|
|
|
}
|
|
|
|
|
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
|
|
|
trace_sched_kthread_stop(k);
|
|
|
if (kthread) {
|
|
|
set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
|
|
|
- clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
|
|
|
+ __kthread_unpark(k, kthread);
|
|
|
wake_up_process(k);
|
|
|
wait_for_completion(&kthread->exited);
|
|
|
}
|