|
@@ -34,6 +34,7 @@
|
|
|
#define ARMADA_370_XP_INT_CONTROL (0x00)
|
|
|
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
|
|
|
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
|
|
|
+#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
|
|
|
|
|
|
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
|
|
|
|
|
@@ -41,28 +42,90 @@
|
|
|
#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
|
|
|
#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
|
|
|
|
|
|
+#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
|
|
|
+
|
|
|
#define ACTIVE_DOORBELLS (8)
|
|
|
|
|
|
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
|
|
|
+
|
|
|
static void __iomem *per_cpu_int_base;
|
|
|
static void __iomem *main_int_base;
|
|
|
static struct irq_domain *armada_370_xp_mpic_domain;
|
|
|
|
|
|
+/*
|
|
|
+ * In SMP mode:
|
|
|
+ * For shared global interrupts, mask/unmask global enable bit
|
|
|
+ * For CPU interrtups, mask/unmask the calling CPU's bit
|
|
|
+ */
|
|
|
static void armada_370_xp_irq_mask(struct irq_data *d)
|
|
|
{
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
|
|
+
|
|
|
+ if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
|
|
|
+ writel(hwirq, main_int_base +
|
|
|
+ ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
|
|
|
+ else
|
|
|
+ writel(hwirq, per_cpu_int_base +
|
|
|
+ ARMADA_370_XP_INT_SET_MASK_OFFS);
|
|
|
+#else
|
|
|
writel(irqd_to_hwirq(d),
|
|
|
per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static void armada_370_xp_irq_unmask(struct irq_data *d)
|
|
|
{
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
|
|
+
|
|
|
+ if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
|
|
|
+ writel(hwirq, main_int_base +
|
|
|
+ ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
|
|
+ else
|
|
|
+ writel(hwirq, per_cpu_int_base +
|
|
|
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
|
|
+#else
|
|
|
writel(irqd_to_hwirq(d),
|
|
|
per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
static int armada_xp_set_affinity(struct irq_data *d,
|
|
|
const struct cpumask *mask_val, bool force)
|
|
|
{
|
|
|
+ unsigned long reg;
|
|
|
+ unsigned long new_mask = 0;
|
|
|
+ unsigned long online_mask = 0;
|
|
|
+ unsigned long count = 0;
|
|
|
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_cpu(cpu, mask_val) {
|
|
|
+ new_mask |= 1 << cpu_logical_map(cpu);
|
|
|
+ count++;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Forbid mutlicore interrupt affinity
|
|
|
+ * This is required since the MPIC HW doesn't limit
|
|
|
+ * several CPUs from acknowledging the same interrupt.
|
|
|
+ */
|
|
|
+ if (count > 1)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ for_each_cpu(cpu, cpu_online_mask)
|
|
|
+ online_mask |= 1 << cpu_logical_map(cpu);
|
|
|
+
|
|
|
+ raw_spin_lock(&irq_controller_lock);
|
|
|
+
|
|
|
+ reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
|
|
|
+ reg = (reg & (~online_mask)) | new_mask;
|
|
|
+ writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
|
|
|
+
|
|
|
+ raw_spin_unlock(&irq_controller_lock);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
#endif
|
|
@@ -155,6 +218,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
armada_xp_mpic_smp_cpu_init();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Set the default affinity from all CPUs to the boot cpu.
|
|
|
+ * This is required since the MPIC doesn't limit several CPUs
|
|
|
+ * from acknowledging the same interrupt.
|
|
|
+ */
|
|
|
+ cpumask_clear(irq_default_affinity);
|
|
|
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
return 0;
|