|
@@ -617,8 +617,11 @@ static int irq_wait_for_interrupt(struct irqaction *action)
|
|
* handler finished. unmask if the interrupt has not been disabled and
|
|
* handler finished. unmask if the interrupt has not been disabled and
|
|
* is marked MASKED.
|
|
* is marked MASKED.
|
|
*/
|
|
*/
|
|
-static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
|
|
|
|
|
|
+static void irq_finalize_oneshot(struct irq_desc *desc,
|
|
|
|
+ struct irqaction *action, bool force)
|
|
{
|
|
{
|
|
|
|
+ if (!(desc->istate & IRQS_ONESHOT))
|
|
|
|
+ return;
|
|
again:
|
|
again:
|
|
chip_bus_lock(desc);
|
|
chip_bus_lock(desc);
|
|
raw_spin_lock_irq(&desc->lock);
|
|
raw_spin_lock_irq(&desc->lock);
|
|
@@ -631,6 +634,11 @@ again:
|
|
* on the other CPU. If we unmask the irq line then the
|
|
* on the other CPU. If we unmask the irq line then the
|
|
* interrupt can come in again and masks the line, leaves due
|
|
* interrupt can come in again and masks the line, leaves due
|
|
* to IRQS_INPROGRESS and the irq line is masked forever.
|
|
* to IRQS_INPROGRESS and the irq line is masked forever.
|
|
|
|
+ *
|
|
|
|
+ * This also serializes the state of shared oneshot handlers
|
|
|
|
+ * versus "desc->threads_onehsot |= action->thread_mask;" in
|
|
|
|
+ * irq_wake_thread(). See the comment there which explains the
|
|
|
|
+ * serialization.
|
|
*/
|
|
*/
|
|
if (unlikely(desc->istate & IRQS_INPROGRESS)) {
|
|
if (unlikely(desc->istate & IRQS_INPROGRESS)) {
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
@@ -639,11 +647,23 @@ again:
|
|
goto again;
|
|
goto again;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!(desc->istate & IRQS_DISABLED) && (desc->istate & IRQS_MASKED)) {
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Now check again, whether the thread should run. Otherwise
|
|
|
|
+ * we would clear the threads_oneshot bit of this thread which
|
|
|
|
+ * was just set.
|
|
|
|
+ */
|
|
|
|
+ if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
|
|
|
+ goto out_unlock;
|
|
|
|
+
|
|
|
|
+ desc->threads_oneshot &= ~action->thread_mask;
|
|
|
|
+
|
|
|
|
+ if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
|
|
|
|
+ (desc->istate & IRQS_MASKED)) {
|
|
irq_compat_clr_masked(desc);
|
|
irq_compat_clr_masked(desc);
|
|
desc->istate &= ~IRQS_MASKED;
|
|
desc->istate &= ~IRQS_MASKED;
|
|
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
|
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
|
}
|
|
}
|
|
|
|
+out_unlock:
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
chip_bus_sync_unlock(desc);
|
|
chip_bus_sync_unlock(desc);
|
|
}
|
|
}
|
|
@@ -691,7 +711,7 @@ static int irq_thread(void *data)
|
|
};
|
|
};
|
|
struct irqaction *action = data;
|
|
struct irqaction *action = data;
|
|
struct irq_desc *desc = irq_to_desc(action->irq);
|
|
struct irq_desc *desc = irq_to_desc(action->irq);
|
|
- int wake, oneshot = desc->istate & IRQS_ONESHOT;
|
|
|
|
|
|
+ int wake;
|
|
|
|
|
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
current->irqaction = action;
|
|
current->irqaction = action;
|
|
@@ -719,8 +739,7 @@ static int irq_thread(void *data)
|
|
|
|
|
|
action->thread_fn(action->irq, action->dev_id);
|
|
action->thread_fn(action->irq, action->dev_id);
|
|
|
|
|
|
- if (oneshot)
|
|
|
|
- irq_finalize_oneshot(action->irq, desc);
|
|
|
|
|
|
+ irq_finalize_oneshot(desc, action, false);
|
|
}
|
|
}
|
|
|
|
|
|
wake = atomic_dec_and_test(&desc->threads_active);
|
|
wake = atomic_dec_and_test(&desc->threads_active);
|
|
@@ -729,6 +748,9 @@ static int irq_thread(void *data)
|
|
wake_up(&desc->wait_for_threads);
|
|
wake_up(&desc->wait_for_threads);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* Prevent a stale desc->threads_oneshot */
|
|
|
|
+ irq_finalize_oneshot(desc, action, true);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Clear irqaction. Otherwise exit_irq_thread() would make
|
|
* Clear irqaction. Otherwise exit_irq_thread() would make
|
|
* fuzz about an active irq thread going into nirvana.
|
|
* fuzz about an active irq thread going into nirvana.
|
|
@@ -743,6 +765,7 @@ static int irq_thread(void *data)
|
|
void exit_irq_thread(void)
|
|
void exit_irq_thread(void)
|
|
{
|
|
{
|
|
struct task_struct *tsk = current;
|
|
struct task_struct *tsk = current;
|
|
|
|
+ struct irq_desc *desc;
|
|
|
|
|
|
if (!tsk->irqaction)
|
|
if (!tsk->irqaction)
|
|
return;
|
|
return;
|
|
@@ -751,6 +774,14 @@ void exit_irq_thread(void)
|
|
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
|
|
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
|
|
tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
|
|
tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
|
|
|
|
|
|
|
|
+ desc = irq_to_desc(tsk->irqaction->irq);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Prevent a stale desc->threads_oneshot. Must be called
|
|
|
|
+ * before setting the IRQTF_DIED flag.
|
|
|
|
+ */
|
|
|
|
+ irq_finalize_oneshot(desc, tsk->irqaction, true);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Set the THREAD DIED flag to prevent further wakeups of the
|
|
* Set the THREAD DIED flag to prevent further wakeups of the
|
|
* soon to be gone threaded handler.
|
|
* soon to be gone threaded handler.
|
|
@@ -767,7 +798,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
{
|
|
{
|
|
struct irqaction *old, **old_ptr;
|
|
struct irqaction *old, **old_ptr;
|
|
const char *old_name = NULL;
|
|
const char *old_name = NULL;
|
|
- unsigned long flags;
|
|
|
|
|
|
+ unsigned long flags, thread_mask = 0;
|
|
int ret, nested, shared = 0;
|
|
int ret, nested, shared = 0;
|
|
cpumask_var_t mask;
|
|
cpumask_var_t mask;
|
|
|
|
|
|
@@ -865,12 +896,23 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|
|
|
|
|
/* add new interrupt at end of irq queue */
|
|
/* add new interrupt at end of irq queue */
|
|
do {
|
|
do {
|
|
|
|
+ thread_mask |= old->thread_mask;
|
|
old_ptr = &old->next;
|
|
old_ptr = &old->next;
|
|
old = *old_ptr;
|
|
old = *old_ptr;
|
|
} while (old);
|
|
} while (old);
|
|
shared = 1;
|
|
shared = 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Setup the thread mask for this irqaction. Unlikely to have
|
|
|
|
+ * 32 resp 64 irqs sharing one line, but who knows.
|
|
|
|
+ */
|
|
|
|
+ if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
|
|
|
|
+ ret = -EBUSY;
|
|
|
|
+ goto out_mask;
|
|
|
|
+ }
|
|
|
|
+ new->thread_mask = 1 << ffz(thread_mask);
|
|
|
|
+
|
|
if (!shared) {
|
|
if (!shared) {
|
|
irq_chip_set_defaults(desc->irq_data.chip);
|
|
irq_chip_set_defaults(desc->irq_data.chip);
|
|
|
|
|