|
@@ -30,6 +30,7 @@
|
|
#include <linux/seq_file.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/types.h>
|
|
#include <linux/types.h>
|
|
|
|
+#include <asm/io.h>
|
|
|
|
|
|
#include <asm/smp.h>
|
|
#include <asm/smp.h>
|
|
|
|
|
|
@@ -84,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
|
|
void no_ack_irq(unsigned int irq) { }
|
|
void no_ack_irq(unsigned int irq) { }
|
|
void no_end_irq(unsigned int irq) { }
|
|
void no_end_irq(unsigned int irq) { }
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
|
|
|
|
+{
|
|
|
|
+ int cpu_dest;
|
|
|
|
+
|
|
|
|
+ /* timer and ipi have to always be received on all CPUs */
|
|
|
|
+ if (irq == TIMER_IRQ || irq == IPI_IRQ) {
|
|
|
|
+ /* Bad linux design decision. The mask has already
|
|
|
|
+ * been set; we must reset it */
|
|
|
|
+ irq_affinity[irq] = CPU_MASK_ALL;
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* whatever mask they set, we just allow one CPU */
|
|
|
|
+ cpu_dest = first_cpu(*dest);
|
|
|
|
+ *dest = cpumask_of_cpu(cpu_dest);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
|
|
|
|
+{
|
|
|
|
+ if (cpu_check_affinity(irq, &dest))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ irq_affinity[irq] = dest;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
static struct hw_interrupt_type cpu_interrupt_type = {
|
|
static struct hw_interrupt_type cpu_interrupt_type = {
|
|
.typename = "CPU",
|
|
.typename = "CPU",
|
|
.startup = cpu_startup_irq,
|
|
.startup = cpu_startup_irq,
|
|
@@ -92,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
|
|
.disable = cpu_disable_irq,
|
|
.disable = cpu_disable_irq,
|
|
.ack = no_ack_irq,
|
|
.ack = no_ack_irq,
|
|
.end = no_end_irq,
|
|
.end = no_end_irq,
|
|
-// .set_affinity = cpu_set_affinity_irq,
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+ .set_affinity = cpu_set_affinity_irq,
|
|
|
|
+#endif
|
|
};
|
|
};
|
|
|
|
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
@@ -229,6 +261,13 @@ int txn_alloc_irq(unsigned int bits_wide)
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
|
|
|
+{
|
|
|
|
+ irq_affinity[irq] = cpumask_of_cpu(cpu);
|
|
|
|
+
|
|
|
|
+ return cpu_data[cpu].txn_addr;
|
|
|
|
+}
|
|
|
|
+
|
|
unsigned long txn_alloc_addr(unsigned int virt_irq)
|
|
unsigned long txn_alloc_addr(unsigned int virt_irq)
|
|
{
|
|
{
|
|
static int next_cpu = -1;
|
|
static int next_cpu = -1;
|
|
@@ -243,7 +282,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
|
|
if (next_cpu >= NR_CPUS)
|
|
if (next_cpu >= NR_CPUS)
|
|
next_cpu = 0; /* nothing else, assign monarch */
|
|
next_cpu = 0; /* nothing else, assign monarch */
|
|
|
|
|
|
- return cpu_data[next_cpu].txn_addr;
|
|
|
|
|
|
+ return txn_affinity_addr(virt_irq, next_cpu);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -282,12 +321,29 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
|
|
|
|
|
/* Work our way from MSb to LSb...same order we alloc EIRs */
|
|
/* Work our way from MSb to LSb...same order we alloc EIRs */
|
|
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
|
|
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
|
|
|
|
+ cpumask_t dest = irq_affinity[irq];
|
|
|
|
+
|
|
if (!(bit & eirr_val))
|
|
if (!(bit & eirr_val))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
/* clear bit in mask - can exit loop sooner */
|
|
/* clear bit in mask - can exit loop sooner */
|
|
eirr_val &= ~bit;
|
|
eirr_val &= ~bit;
|
|
|
|
|
|
|
|
+ /* FIXME: because generic set affinity mucks
|
|
|
|
+ * with the affinity before sending it to us
|
|
|
|
+ * we can get the situation where the affinity is
|
|
|
|
+ * wrong for our CPU type interrupts */
|
|
|
|
+ if (irq != TIMER_IRQ && irq != IPI_IRQ &&
|
|
|
|
+ !cpu_isset(smp_processor_id(), dest)) {
|
|
|
|
+ int cpu = first_cpu(dest);
|
|
|
|
+
|
|
|
|
+ printk("rethrowing irq %d from %d to %d\n",
|
|
|
|
+ irq, smp_processor_id(), cpu);
|
|
|
|
+ gsc_writel(irq + CPU_IRQ_BASE,
|
|
|
|
+ cpu_data[cpu].hpa);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
__do_IRQ(irq, regs);
|
|
__do_IRQ(irq, regs);
|
|
}
|
|
}
|
|
}
|
|
}
|