|
@@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0;
|
|
|
*/
|
|
|
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
|
|
|
|
|
|
-static void cpu_mask_irq(unsigned int irq)
|
|
|
+static void cpu_mask_irq(struct irq_data *d)
|
|
|
{
|
|
|
- unsigned long eirr_bit = EIEM_MASK(irq);
|
|
|
+ unsigned long eirr_bit = EIEM_MASK(d->irq);
|
|
|
|
|
|
cpu_eiem &= ~eirr_bit;
|
|
|
/* Do nothing on the other CPUs. If they get this interrupt,
|
|
@@ -63,7 +63,7 @@ static void cpu_mask_irq(unsigned int irq)
|
|
|
* then gets disabled */
|
|
|
}
|
|
|
|
|
|
-static void cpu_unmask_irq(unsigned int irq)
|
|
|
+static void __cpu_unmask_irq(unsigned int irq)
|
|
|
{
|
|
|
unsigned long eirr_bit = EIEM_MASK(irq);
|
|
|
|
|
@@ -75,9 +75,14 @@ static void cpu_unmask_irq(unsigned int irq)
|
|
|
smp_send_all_nop();
|
|
|
}
|
|
|
|
|
|
-void cpu_ack_irq(unsigned int irq)
|
|
|
+static void cpu_unmask_irq(struct irq_data *d)
|
|
|
+{
|
|
|
+ __cpu_unmask_irq(d->irq);
|
|
|
+}
|
|
|
+
|
|
|
+void cpu_ack_irq(struct irq_data *d)
|
|
|
{
|
|
|
- unsigned long mask = EIEM_MASK(irq);
|
|
|
+ unsigned long mask = EIEM_MASK(d->irq);
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
/* Clear in EIEM so we can no longer process */
|
|
@@ -90,9 +95,9 @@ void cpu_ack_irq(unsigned int irq)
|
|
|
mtctl(mask, 23);
|
|
|
}
|
|
|
|
|
|
-void cpu_eoi_irq(unsigned int irq)
|
|
|
+void cpu_eoi_irq(struct irq_data *d)
|
|
|
{
|
|
|
- unsigned long mask = EIEM_MASK(irq);
|
|
|
+ unsigned long mask = EIEM_MASK(d->irq);
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
/* set it in the eiems---it's no longer in process */
|
|
@@ -103,15 +108,16 @@ void cpu_eoi_irq(unsigned int irq)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
-int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
|
|
|
+int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
|
|
|
{
|
|
|
int cpu_dest;
|
|
|
|
|
|
/* timer and ipi have to always be received on all CPUs */
|
|
|
- if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) {
|
|
|
+ if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) {
|
|
|
/* Bad linux design decision. The mask has already
|
|
|
- * been set; we must reset it */
|
|
|
- cpumask_setall(irq_desc[irq].affinity);
|
|
|
+ * been set; we must reset it. Will fix - tglx
|
|
|
+ */
|
|
|
+ cpumask_setall(d->affinity);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -121,33 +127,34 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
|
|
|
return cpu_dest;
|
|
|
}
|
|
|
|
|
|
-static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
|
|
|
+static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
|
|
|
+ bool force)
|
|
|
{
|
|
|
int cpu_dest;
|
|
|
|
|
|
- cpu_dest = cpu_check_affinity(irq, dest);
|
|
|
+ cpu_dest = cpu_check_affinity(d, dest);
|
|
|
if (cpu_dest < 0)
|
|
|
return -1;
|
|
|
|
|
|
- cpumask_copy(irq_desc[irq].affinity, dest);
|
|
|
+ cpumask_copy(d->affinity, dest);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
static struct irq_chip cpu_interrupt_type = {
|
|
|
- .name = "CPU",
|
|
|
- .mask = cpu_mask_irq,
|
|
|
- .unmask = cpu_unmask_irq,
|
|
|
- .ack = cpu_ack_irq,
|
|
|
- .eoi = cpu_eoi_irq,
|
|
|
+ .name = "CPU",
|
|
|
+ .irq_mask = cpu_mask_irq,
|
|
|
+ .irq_unmask = cpu_unmask_irq,
|
|
|
+ .irq_ack = cpu_ack_irq,
|
|
|
+ .irq_eoi = cpu_eoi_irq,
|
|
|
#ifdef CONFIG_SMP
|
|
|
- .set_affinity = cpu_set_affinity_irq,
|
|
|
+ .irq_set_affinity = cpu_set_affinity_irq,
|
|
|
#endif
|
|
|
/* XXX: Needs to be written. We managed without it so far, but
|
|
|
* we really ought to write it.
|
|
|
*/
|
|
|
- .retrigger = NULL,
|
|
|
+ .irq_retrigger = NULL,
|
|
|
};
|
|
|
|
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
@@ -181,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
|
|
#endif
|
|
|
|
|
|
- seq_printf(p, " %14s", irq_desc[i].chip->name);
|
|
|
+ seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
|
|
|
#ifndef PARISC_IRQ_CR16_COUNTS
|
|
|
seq_printf(p, " %s", action->name);
|
|
|
|
|
@@ -233,14 +240,14 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
|
|
|
{
|
|
|
if (irq_desc[irq].action)
|
|
|
return -EBUSY;
|
|
|
- if (irq_desc[irq].chip != &cpu_interrupt_type)
|
|
|
+ if (get_irq_chip(irq) != &cpu_interrupt_type)
|
|
|
return -EBUSY;
|
|
|
|
|
|
/* for iosapic interrupts */
|
|
|
if (type) {
|
|
|
set_irq_chip_and_handler(irq, type, handle_percpu_irq);
|
|
|
set_irq_chip_data(irq, data);
|
|
|
- cpu_unmask_irq(irq);
|
|
|
+ __cpu_unmask_irq(irq);
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -289,7 +296,8 @@ int txn_alloc_irq(unsigned int bits_wide)
|
|
|
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
|
|
{
|
|
|
#ifdef CONFIG_SMP
|
|
|
- cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
|
|
+ struct irq_data *d = irq_get_irq_data(irq);
|
|
|
+ cpumask_copy(d->affinity, cpumask_of(cpu));
|
|
|
#endif
|
|
|
|
|
|
return per_cpu(cpu_data, cpu).txn_addr;
|
|
@@ -333,6 +341,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
|
|
unsigned long eirr_val;
|
|
|
int irq, cpu = smp_processor_id();
|
|
|
#ifdef CONFIG_SMP
|
|
|
+ struct irq_desc *desc;
|
|
|
cpumask_t dest;
|
|
|
#endif
|
|
|
|
|
@@ -346,8 +355,9 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
|
|
irq = eirr_to_irq(eirr_val);
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
- cpumask_copy(&dest, irq_desc[irq].affinity);
|
|
|
- if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
|
|
|
+ desc = irq_to_desc(irq);
|
|
|
+ cpumask_copy(&dest, desc->irq_data.affinity);
|
|
|
+ if (CHECK_IRQ_PER_CPU(desc->status) &&
|
|
|
!cpu_isset(smp_processor_id(), dest)) {
|
|
|
int cpu = first_cpu(dest);
|
|
|
|