|
@@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|
|
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ raw_spin_lock(&irq_controller_lock);
|
|
|
mask = 0xff << shift;
|
|
|
bit = gic_cpu_map[cpu] << shift;
|
|
|
-
|
|
|
- raw_spin_lock(&irq_controller_lock);
|
|
|
val = readl_relaxed(reg) & ~mask;
|
|
|
writel_relaxed(val | bit, reg);
|
|
|
raw_spin_unlock(&irq_controller_lock);
|
|
@@ -646,7 +645,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
|
|
|
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|
|
{
|
|
|
int cpu;
|
|
|
- unsigned long map = 0;
|
|
|
+ unsigned long flags, map = 0;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
|
|
|
|
|
|
/* Convert our logical CPU mask into a physical one. */
|
|
|
for_each_cpu(cpu, mask)
|
|
@@ -660,6 +661,86 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|
|
|
|
|
/* this always happens on GIC0 */
|
|
|
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
|
|
+
|
|
|
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_BL_SWITCHER
|
|
|
+/*
|
|
|
+ * gic_migrate_target - migrate IRQs to another CPU interface
|
|
|
+ *
|
|
|
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
|
|
|
+ *
|
|
|
+ * Migrate all peripheral interrupts with a target matching the current CPU
|
|
|
+ * to the interface corresponding to @new_cpu_id. The CPU interface mapping
|
|
|
+ * is also updated. Targets to other CPU interfaces are unchanged.
|
|
|
+ * This must be called with IRQs locally disabled.
|
|
|
+ */
|
|
|
+void gic_migrate_target(unsigned int new_cpu_id)
|
|
|
+{
|
|
|
+ unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
|
|
|
+ void __iomem *dist_base;
|
|
|
+ int i, ror_val, cpu = smp_processor_id();
|
|
|
+ u32 val, cur_target_mask, active_mask;
|
|
|
+
|
|
|
+ if (gic_nr >= MAX_GIC_NR)
|
|
|
+ BUG();
|
|
|
+
|
|
|
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
|
|
|
+ if (!dist_base)
|
|
|
+ return;
|
|
|
+ gic_irqs = gic_data[gic_nr].gic_irqs;
|
|
|
+
|
|
|
+ cur_cpu_id = __ffs(gic_cpu_map[cpu]);
|
|
|
+ cur_target_mask = 0x01010101 << cur_cpu_id;
|
|
|
+ ror_val = (cur_cpu_id - new_cpu_id) & 31;
|
|
|
+
|
|
|
+ raw_spin_lock(&irq_controller_lock);
|
|
|
+
|
|
|
+ /* Update the target interface for this logical CPU */
|
|
|
+ gic_cpu_map[cpu] = 1 << new_cpu_id;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Find all the peripheral interrupts targetting the current
|
|
|
+ * CPU interface and migrate them to the new CPU interface.
|
|
|
+ * We skip DIST_TARGET 0 to 7 as they are read-only.
|
|
|
+ */
|
|
|
+ for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
|
|
|
+ val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
|
|
|
+ active_mask = val & cur_target_mask;
|
|
|
+ if (active_mask) {
|
|
|
+ val &= ~active_mask;
|
|
|
+ val |= ror32(active_mask, ror_val);
|
|
|
+ writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ raw_spin_unlock(&irq_controller_lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Now let's migrate and clear any potential SGIs that might be
|
|
|
+ * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
|
|
|
+ * is a banked register, we can only forward the SGI using
|
|
|
+ * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
|
|
|
+ * doesn't use that information anyway.
|
|
|
+ *
|
|
|
+ * For the same reason we do not adjust SGI source information
|
|
|
+ * for previously sent SGIs by us to other CPUs either.
|
|
|
+ */
|
|
|
+ for (i = 0; i < 16; i += 4) {
|
|
|
+ int j;
|
|
|
+ val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
|
|
|
+ if (!val)
|
|
|
+ continue;
|
|
|
+ writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
|
|
|
+ for (j = i; j < i + 4; j++) {
|
|
|
+ if (val & 0xff)
|
|
|
+ writel_relaxed((1 << (new_cpu_id + 16)) | j,
|
|
|
+ dist_base + GIC_DIST_SOFTINT);
|
|
|
+ val >>= 8;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
#endif
|
|
|
|