|
@@ -759,6 +759,13 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void wake_threads_waitq(struct irq_desc *desc)
|
|
|
+{
|
|
|
+ if (atomic_dec_and_test(&desc->threads_active) &&
|
|
|
+ waitqueue_active(&desc->wait_for_threads))
|
|
|
+ wake_up(&desc->wait_for_threads);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Interrupt handler thread
|
|
|
*/
|
|
@@ -771,57 +778,41 @@ static int irq_thread(void *data)
|
|
|
struct irq_desc *desc = irq_to_desc(action->irq);
|
|
|
irqreturn_t (*handler_fn)(struct irq_desc *desc,
|
|
|
struct irqaction *action);
|
|
|
- int wake;
|
|
|
|
|
|
- if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
|
|
|
+ if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
|
|
|
&action->thread_flags))
|
|
|
handler_fn = irq_forced_thread_fn;
|
|
|
else
|
|
|
handler_fn = irq_thread_fn;
|
|
|
|
|
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
|
- current->irqaction = action;
|
|
|
+ current->irq_thread = 1;
|
|
|
|
|
|
while (!irq_wait_for_interrupt(action)) {
|
|
|
+ irqreturn_t action_ret;
|
|
|
|
|
|
irq_thread_check_affinity(desc, action);
|
|
|
|
|
|
- atomic_inc(&desc->threads_active);
|
|
|
+ action_ret = handler_fn(desc, action);
|
|
|
+ if (!noirqdebug)
|
|
|
+ note_interrupt(action->irq, desc, action_ret);
|
|
|
|
|
|
- raw_spin_lock_irq(&desc->lock);
|
|
|
- if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
|
|
|
- /*
|
|
|
- * CHECKME: We might need a dedicated
|
|
|
- * IRQ_THREAD_PENDING flag here, which
|
|
|
- * retriggers the thread in check_irq_resend()
|
|
|
- * but AFAICT IRQS_PENDING should be fine as it
|
|
|
- * retriggers the interrupt itself --- tglx
|
|
|
- */
|
|
|
- desc->istate |= IRQS_PENDING;
|
|
|
- raw_spin_unlock_irq(&desc->lock);
|
|
|
- } else {
|
|
|
- irqreturn_t action_ret;
|
|
|
-
|
|
|
- raw_spin_unlock_irq(&desc->lock);
|
|
|
- action_ret = handler_fn(desc, action);
|
|
|
- if (!noirqdebug)
|
|
|
- note_interrupt(action->irq, desc, action_ret);
|
|
|
- }
|
|
|
-
|
|
|
- wake = atomic_dec_and_test(&desc->threads_active);
|
|
|
-
|
|
|
- if (wake && waitqueue_active(&desc->wait_for_threads))
|
|
|
- wake_up(&desc->wait_for_threads);
|
|
|
+ wake_threads_waitq(desc);
|
|
|
}
|
|
|
|
|
|
- /* Prevent a stale desc->threads_oneshot */
|
|
|
- irq_finalize_oneshot(desc, action, true);
|
|
|
-
|
|
|
/*
|
|
|
- * Clear irqaction. Otherwise exit_irq_thread() would make
|
|
|
+ * This is the regular exit path. __free_irq() is stopping the
|
|
|
+ * thread via kthread_stop() after calling
|
|
|
+ * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
|
|
|
+ * oneshot mask bit can be set. We cannot verify that as we
|
|
|
+ * cannot touch the oneshot mask at this point anymore as
|
|
|
+ * __setup_irq() might have given out currents thread_mask
|
|
|
+ * again.
|
|
|
+ *
|
|
|
+ * Clear irq_thread. Otherwise exit_irq_thread() would make
|
|
|
* fuzz about an active irq thread going into nirvana.
|
|
|
*/
|
|
|
- current->irqaction = NULL;
|
|
|
+ current->irq_thread = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -832,27 +823,28 @@ void exit_irq_thread(void)
|
|
|
{
|
|
|
struct task_struct *tsk = current;
|
|
|
struct irq_desc *desc;
|
|
|
+ struct irqaction *action;
|
|
|
|
|
|
- if (!tsk->irqaction)
|
|
|
+ if (!tsk->irq_thread)
|
|
|
return;
|
|
|
|
|
|
+ action = kthread_data(tsk);
|
|
|
+
|
|
|
printk(KERN_ERR
|
|
|
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
|
|
|
- tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
|
|
|
+ tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
|
|
|
|
|
|
- desc = irq_to_desc(tsk->irqaction->irq);
|
|
|
+ desc = irq_to_desc(action->irq);
|
|
|
|
|
|
/*
|
|
|
- * Prevent a stale desc->threads_oneshot. Must be called
|
|
|
- * before setting the IRQTF_DIED flag.
|
|
|
+ * If IRQTF_RUNTHREAD is set, we need to decrement
|
|
|
+ * desc->threads_active and wake possible waiters.
|
|
|
*/
|
|
|
- irq_finalize_oneshot(desc, tsk->irqaction, true);
|
|
|
+ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
|
|
+ wake_threads_waitq(desc);
|
|
|
|
|
|
- /*
|
|
|
- * Set the THREAD DIED flag to prevent further wakeups of the
|
|
|
- * soon to be gone threaded handler.
|
|
|
- */
|
|
|
- set_bit(IRQTF_DIED, &tsk->irqaction->flags);
|
|
|
+ /* Prevent a stale desc->threads_oneshot */
|
|
|
+ irq_finalize_oneshot(desc, action, true);
|
|
|
}
|
|
|
|
|
|
static void irq_setup_forced_threading(struct irqaction *new)
|
|
@@ -1135,8 +1127,7 @@ out_thread:
|
|
|
struct task_struct *t = new->thread;
|
|
|
|
|
|
new->thread = NULL;
|
|
|
- if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
|
|
|
- kthread_stop(t);
|
|
|
+ kthread_stop(t);
|
|
|
put_task_struct(t);
|
|
|
}
|
|
|
out_mput:
|
|
@@ -1246,8 +1237,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
|
|
#endif
|
|
|
|
|
|
if (action->thread) {
|
|
|
- if (!test_bit(IRQTF_DIED, &action->thread_flags))
|
|
|
- kthread_stop(action->thread);
|
|
|
+ kthread_stop(action->thread);
|
|
|
put_task_struct(action->thread);
|
|
|
}
|
|
|
|