|
@@ -37,6 +37,12 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)
|
|
|
irq_chip_set_defaults(chip);
|
|
|
desc->irq_data.chip = chip;
|
|
|
irq_put_desc_unlock(desc, flags);
|
|
|
+ /*
|
|
|
+ * For !CONFIG_SPARSE_IRQ make the irq show up in
|
|
|
+ * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
|
|
|
+ * already marked, and this call is harmless.
|
|
|
+ */
|
|
|
+ irq_reserve_irq(irq);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(irq_set_chip);
|
|
@@ -134,25 +140,25 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data);
|
|
|
|
|
|
static void irq_state_clr_disabled(struct irq_desc *desc)
|
|
|
{
|
|
|
- desc->istate &= ~IRQS_DISABLED;
|
|
|
+ irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
|
|
|
irq_compat_clr_disabled(desc);
|
|
|
}
|
|
|
|
|
|
static void irq_state_set_disabled(struct irq_desc *desc)
|
|
|
{
|
|
|
- desc->istate |= IRQS_DISABLED;
|
|
|
+ irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
|
|
|
irq_compat_set_disabled(desc);
|
|
|
}
|
|
|
|
|
|
static void irq_state_clr_masked(struct irq_desc *desc)
|
|
|
{
|
|
|
- desc->istate &= ~IRQS_MASKED;
|
|
|
+ irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
|
|
|
irq_compat_clr_masked(desc);
|
|
|
}
|
|
|
|
|
|
static void irq_state_set_masked(struct irq_desc *desc)
|
|
|
{
|
|
|
- desc->istate |= IRQS_MASKED;
|
|
|
+ irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
|
|
|
irq_compat_set_masked(desc);
|
|
|
}
|
|
|
|
|
@@ -372,11 +378,11 @@ void handle_nested_irq(unsigned int irq)
|
|
|
kstat_incr_irqs_this_cpu(irq, desc);
|
|
|
|
|
|
action = desc->action;
|
|
|
- if (unlikely(!action || (desc->istate & IRQS_DISABLED)))
|
|
|
+ if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
|
|
|
goto out_unlock;
|
|
|
|
|
|
irq_compat_set_progress(desc);
|
|
|
- desc->istate |= IRQS_INPROGRESS;
|
|
|
+ irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
|
|
|
|
action_ret = action->thread_fn(action->irq, action->dev_id);
|
|
@@ -384,7 +390,7 @@ void handle_nested_irq(unsigned int irq)
|
|
|
note_interrupt(irq, desc, action_ret);
|
|
|
|
|
|
raw_spin_lock_irq(&desc->lock);
|
|
|
- desc->istate &= ~IRQS_INPROGRESS;
|
|
|
+ irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
|
|
irq_compat_clr_progress(desc);
|
|
|
|
|
|
out_unlock:
|
|
@@ -416,14 +422,14 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
{
|
|
|
raw_spin_lock(&desc->lock);
|
|
|
|
|
|
- if (unlikely(desc->istate & IRQS_INPROGRESS))
|
|
|
+ if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
|
|
if (!irq_check_poll(desc))
|
|
|
goto out_unlock;
|
|
|
|
|
|
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
|
|
kstat_incr_irqs_this_cpu(irq, desc);
|
|
|
|
|
|
- if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
|
|
|
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
|
|
|
goto out_unlock;
|
|
|
|
|
|
handle_irq_event(desc);
|
|
@@ -448,7 +454,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
raw_spin_lock(&desc->lock);
|
|
|
mask_ack_irq(desc);
|
|
|
|
|
|
- if (unlikely(desc->istate & IRQS_INPROGRESS))
|
|
|
+ if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
|
|
if (!irq_check_poll(desc))
|
|
|
goto out_unlock;
|
|
|
|
|
@@ -459,12 +465,12 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
* If its disabled or no action available
|
|
|
* keep it masked and get out of here
|
|
|
*/
|
|
|
- if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
|
|
|
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
|
|
|
goto out_unlock;
|
|
|
|
|
|
handle_irq_event(desc);
|
|
|
|
|
|
- if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT)))
|
|
|
+ if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
|
|
|
unmask_irq(desc);
|
|
|
out_unlock:
|
|
|
raw_spin_unlock(&desc->lock);
|
|
@@ -496,7 +502,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
{
|
|
|
raw_spin_lock(&desc->lock);
|
|
|
|
|
|
- if (unlikely(desc->istate & IRQS_INPROGRESS))
|
|
|
+ if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
|
|
if (!irq_check_poll(desc))
|
|
|
goto out;
|
|
|
|
|
@@ -507,7 +513,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
* If its disabled or no action available
|
|
|
* then mask it and get out of here:
|
|
|
*/
|
|
|
- if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) {
|
|
|
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
|
|
|
irq_compat_set_pending(desc);
|
|
|
desc->istate |= IRQS_PENDING;
|
|
|
mask_irq(desc);
|
|
@@ -558,8 +564,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
* we shouldn't process the IRQ. Mark it pending, handle
|
|
|
* the necessary masking and go out
|
|
|
*/
|
|
|
- if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) ||
|
|
|
- !desc->action))) {
|
|
|
+ if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
|
|
|
+ irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
|
|
|
if (!irq_check_poll(desc)) {
|
|
|
irq_compat_set_pending(desc);
|
|
|
desc->istate |= IRQS_PENDING;
|
|
@@ -584,20 +590,65 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
* Renable it, if it was not disabled in meantime.
|
|
|
*/
|
|
|
if (unlikely(desc->istate & IRQS_PENDING)) {
|
|
|
- if (!(desc->istate & IRQS_DISABLED) &&
|
|
|
- (desc->istate & IRQS_MASKED))
|
|
|
+ if (!irqd_irq_disabled(&desc->irq_data) &&
|
|
|
+ irqd_irq_masked(&desc->irq_data))
|
|
|
unmask_irq(desc);
|
|
|
}
|
|
|
|
|
|
handle_irq_event(desc);
|
|
|
|
|
|
} while ((desc->istate & IRQS_PENDING) &&
|
|
|
- !(desc->istate & IRQS_DISABLED));
|
|
|
+ !irqd_irq_disabled(&desc->irq_data));
|
|
|
|
|
|
out_unlock:
|
|
|
raw_spin_unlock(&desc->lock);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
|
|
|
+/**
|
|
|
+ * handle_edge_eoi_irq - edge eoi type IRQ handler
|
|
|
+ * @irq: the interrupt number
|
|
|
+ * @desc: the interrupt description structure for this irq
|
|
|
+ *
|
|
|
+ * Similar as the above handle_edge_irq, but using eoi and w/o the
|
|
|
+ * mask/unmask logic.
|
|
|
+ */
|
|
|
+void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
|
|
+{
|
|
|
+ struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
|
+
|
|
|
+ raw_spin_lock(&desc->lock);
|
|
|
+
|
|
|
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
|
|
+ /*
|
|
|
+ * If we're currently running this IRQ, or its disabled,
|
|
|
+ * we shouldn't process the IRQ. Mark it pending, handle
|
|
|
+ * the necessary masking and go out
|
|
|
+ */
|
|
|
+ if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
|
|
|
+ irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
|
|
|
+ if (!irq_check_poll(desc)) {
|
|
|
+ desc->istate |= IRQS_PENDING;
|
|
|
+ goto out_eoi;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ kstat_incr_irqs_this_cpu(irq, desc);
|
|
|
+
|
|
|
+ do {
|
|
|
+ if (unlikely(!desc->action))
|
|
|
+ goto out_eoi;
|
|
|
+
|
|
|
+ handle_irq_event(desc);
|
|
|
+
|
|
|
+ } while ((desc->istate & IRQS_PENDING) &&
|
|
|
+ !irqd_irq_disabled(&desc->irq_data));
|
|
|
+
|
|
|
+out_unlock:
|
|
|
+ chip->irq_eoi(&desc->irq_data);
|
|
|
+ raw_spin_unlock(&desc->lock);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* handle_percpu_irq - Per CPU local irq handler
|
|
|
* @irq: the interrupt number
|
|
@@ -642,8 +693,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|
|
if (handle == handle_bad_irq) {
|
|
|
if (desc->irq_data.chip != &no_irq_chip)
|
|
|
mask_ack_irq(desc);
|
|
|
- irq_compat_set_disabled(desc);
|
|
|
- desc->istate |= IRQS_DISABLED;
|
|
|
+ irq_state_set_disabled(desc);
|
|
|
desc->depth = 1;
|
|
|
}
|
|
|
desc->handle_irq = handle;
|
|
@@ -684,8 +734,70 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
|
|
irqd_set(&desc->irq_data, IRQD_PER_CPU);
|
|
|
if (irq_settings_can_move_pcntxt(desc))
|
|
|
irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
|
|
|
+ if (irq_settings_is_level(desc))
|
|
|
+ irqd_set(&desc->irq_data, IRQD_LEVEL);
|
|
|
|
|
|
irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
|
|
|
|
|
|
irq_put_desc_unlock(desc, flags);
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * irq_cpu_online - Invoke all irq_cpu_online functions.
|
|
|
+ *
|
|
|
+ * Iterate through all irqs and invoke the chip.irq_cpu_online()
|
|
|
+ * for each.
|
|
|
+ */
|
|
|
+void irq_cpu_online(void)
|
|
|
+{
|
|
|
+ struct irq_desc *desc;
|
|
|
+ struct irq_chip *chip;
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned int irq;
|
|
|
+
|
|
|
+ for_each_active_irq(irq) {
|
|
|
+ desc = irq_to_desc(irq);
|
|
|
+ if (!desc)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
+
|
|
|
+ chip = irq_data_get_irq_chip(&desc->irq_data);
|
|
|
+ if (chip && chip->irq_cpu_online &&
|
|
|
+ (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
|
|
|
+ !irqd_irq_disabled(&desc->irq_data)))
|
|
|
+ chip->irq_cpu_online(&desc->irq_data);
|
|
|
+
|
|
|
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * irq_cpu_offline - Invoke all irq_cpu_offline functions.
|
|
|
+ *
|
|
|
+ * Iterate through all irqs and invoke the chip.irq_cpu_offline()
|
|
|
+ * for each.
|
|
|
+ */
|
|
|
+void irq_cpu_offline(void)
|
|
|
+{
|
|
|
+ struct irq_desc *desc;
|
|
|
+ struct irq_chip *chip;
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned int irq;
|
|
|
+
|
|
|
+ for_each_active_irq(irq) {
|
|
|
+ desc = irq_to_desc(irq);
|
|
|
+ if (!desc)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
+
|
|
|
+ chip = irq_data_get_irq_chip(&desc->irq_data);
|
|
|
+ if (chip && chip->irq_cpu_offline &&
|
|
|
+ (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
|
|
|
+ !irqd_irq_disabled(&desc->irq_data)))
|
|
|
+ chip->irq_cpu_offline(&desc->irq_data);
|
|
|
+
|
|
|
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
+ }
|
|
|
+}
|