|
@@ -46,6 +46,12 @@
|
|
|
|
|
|
#define IRQ_DEBUG 0
|
|
|
|
|
|
+#define IRQ_VECTOR_UNASSIGNED (0)
|
|
|
+
|
|
|
+#define IRQ_UNUSED (0)
|
|
|
+#define IRQ_USED (1)
|
|
|
+#define IRQ_RSVD (2)
|
|
|
+
|
|
|
/* These can be overridden in platform_irq_init */
|
|
|
int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
|
|
|
int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
|
|
@@ -64,46 +70,161 @@ __u8 isa_irq_to_vector_map[16] = {
|
|
|
};
|
|
|
EXPORT_SYMBOL(isa_irq_to_vector_map);
|
|
|
|
|
|
-static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
|
|
|
+DEFINE_SPINLOCK(vector_lock);
|
|
|
+
|
|
|
+struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
|
|
|
+ [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED }
|
|
|
+};
|
|
|
+
|
|
|
+DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
|
|
|
+ [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
|
|
|
+};
|
|
|
+
|
|
|
+static int irq_status[NR_IRQS] = {
|
|
|
+ [0 ... NR_IRQS -1] = IRQ_UNUSED
|
|
|
+};
|
|
|
+
|
|
|
+int check_irq_used(int irq)
|
|
|
+{
|
|
|
+ if (irq_status[irq] == IRQ_USED)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ return -1;
|
|
|
+}
|
|
|
+
|
|
|
+static void reserve_irq(unsigned int irq)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&vector_lock, flags);
|
|
|
+ irq_status[irq] = IRQ_RSVD;
|
|
|
+ spin_unlock_irqrestore(&vector_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline int find_unassigned_irq(void)
|
|
|
+{
|
|
|
+ int irq;
|
|
|
+
|
|
|
+ for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
|
|
|
+ if (irq_status[irq] == IRQ_UNUSED)
|
|
|
+ return irq;
|
|
|
+ return -ENOSPC;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int find_unassigned_vector(void)
|
|
|
+{
|
|
|
+ int vector;
|
|
|
+
|
|
|
+ for (vector = IA64_FIRST_DEVICE_VECTOR;
|
|
|
+ vector <= IA64_LAST_DEVICE_VECTOR; vector++)
|
|
|
+ if (__get_cpu_var(vector_irq[vector]) == IA64_SPURIOUS_INT_VECTOR)
|
|
|
+ return vector;
|
|
|
+ return -ENOSPC;
|
|
|
+}
|
|
|
+
|
|
|
+static int __bind_irq_vector(int irq, int vector)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ if (irq_to_vector(irq) == vector)
|
|
|
+ return 0;
|
|
|
+ if (irq_to_vector(irq) != IRQ_VECTOR_UNASSIGNED)
|
|
|
+ return -EBUSY;
|
|
|
+ for_each_online_cpu(cpu)
|
|
|
+ per_cpu(vector_irq, cpu)[vector] = irq;
|
|
|
+ irq_cfg[irq].vector = vector;
|
|
|
+ irq_status[irq] = IRQ_USED;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int bind_irq_vector(int irq, int vector)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&vector_lock, flags);
|
|
|
+ ret = __bind_irq_vector(irq, vector);
|
|
|
+ spin_unlock_irqrestore(&vector_lock, flags);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void clear_irq_vector(int irq)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int vector, cpu;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&vector_lock, flags);
|
|
|
+ BUG_ON((unsigned)irq >= NR_IRQS);
|
|
|
+ BUG_ON(irq_cfg[irq].vector == IRQ_VECTOR_UNASSIGNED);
|
|
|
+ vector = irq_cfg[irq].vector;
|
|
|
+ for_each_online_cpu(cpu)
|
|
|
+ per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
|
|
|
+ irq_cfg[irq].vector = IRQ_VECTOR_UNASSIGNED;
|
|
|
+ irq_status[irq] = IRQ_UNUSED;
|
|
|
+ spin_unlock_irqrestore(&vector_lock, flags);
|
|
|
+}
|
|
|
|
|
|
int
|
|
|
assign_irq_vector (int irq)
|
|
|
{
|
|
|
- int pos, vector;
|
|
|
- again:
|
|
|
- pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
|
|
|
- vector = IA64_FIRST_DEVICE_VECTOR + pos;
|
|
|
- if (vector > IA64_LAST_DEVICE_VECTOR)
|
|
|
- return -ENOSPC;
|
|
|
- if (test_and_set_bit(pos, ia64_vector_mask))
|
|
|
- goto again;
|
|
|
+ unsigned long flags;
|
|
|
+ int vector = -ENOSPC;
|
|
|
+
|
|
|
+ if (irq < 0) {
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ spin_lock_irqsave(&vector_lock, flags);
|
|
|
+ vector = find_unassigned_vector();
|
|
|
+ if (vector < 0)
|
|
|
+ goto out;
|
|
|
+ BUG_ON(__bind_irq_vector(irq, vector));
|
|
|
+ spin_unlock_irqrestore(&vector_lock, flags);
|
|
|
+ out:
|
|
|
return vector;
|
|
|
}
|
|
|
|
|
|
void
|
|
|
free_irq_vector (int vector)
|
|
|
{
|
|
|
- int pos;
|
|
|
-
|
|
|
- if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
|
|
|
+ if (vector < IA64_FIRST_DEVICE_VECTOR ||
|
|
|
+ vector > IA64_LAST_DEVICE_VECTOR)
|
|
|
return;
|
|
|
-
|
|
|
- pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
|
|
- if (!test_and_clear_bit(pos, ia64_vector_mask))
|
|
|
- printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
|
|
|
+ clear_irq_vector(vector);
|
|
|
}
|
|
|
|
|
|
int
|
|
|
reserve_irq_vector (int vector)
|
|
|
{
|
|
|
- int pos;
|
|
|
-
|
|
|
if (vector < IA64_FIRST_DEVICE_VECTOR ||
|
|
|
vector > IA64_LAST_DEVICE_VECTOR)
|
|
|
return -EINVAL;
|
|
|
+ return !!bind_irq_vector(vector, vector);
|
|
|
+}
|
|
|
|
|
|
- pos = vector - IA64_FIRST_DEVICE_VECTOR;
|
|
|
- return test_and_set_bit(pos, ia64_vector_mask);
|
|
|
+/*
|
|
|
+ * Initialize vector_irq on a new cpu. This function must be called
|
|
|
+ * with vector_lock held.
|
|
|
+ */
|
|
|
+void __setup_vector_irq(int cpu)
|
|
|
+{
|
|
|
+ int irq, vector;
|
|
|
+
|
|
|
+ /* Clear vector_irq */
|
|
|
+ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
|
|
|
+ per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
|
|
|
+ /* Mark the inuse vectors */
|
|
|
+ for (irq = 0; irq < NR_IRQS; ++irq) {
|
|
|
+ if ((vector = irq_to_vector(irq)) != IRQ_VECTOR_UNASSIGNED)
|
|
|
+ per_cpu(vector_irq, cpu)[vector] = irq;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void destroy_and_reserve_irq(unsigned int irq)
|
|
|
+{
|
|
|
+ dynamic_irq_cleanup(irq);
|
|
|
+
|
|
|
+ clear_irq_vector(irq);
|
|
|
+ reserve_irq(irq);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -111,18 +232,29 @@ reserve_irq_vector (int vector)
|
|
|
*/
|
|
|
int create_irq(void)
|
|
|
{
|
|
|
- int vector = assign_irq_vector(AUTO_ASSIGN);
|
|
|
-
|
|
|
- if (vector >= 0)
|
|
|
- dynamic_irq_init(vector);
|
|
|
-
|
|
|
- return vector;
|
|
|
+ unsigned long flags;
|
|
|
+ int irq, vector;
|
|
|
+
|
|
|
+ irq = -ENOSPC;
|
|
|
+ spin_lock_irqsave(&vector_lock, flags);
|
|
|
+ vector = find_unassigned_vector();
|
|
|
+ if (vector < 0)
|
|
|
+ goto out;
|
|
|
+ irq = find_unassigned_irq();
|
|
|
+ if (irq < 0)
|
|
|
+ goto out;
|
|
|
+ BUG_ON(__bind_irq_vector(irq, vector));
|
|
|
+ out:
|
|
|
+ spin_unlock_irqrestore(&vector_lock, flags);
|
|
|
+ if (irq >= 0)
|
|
|
+ dynamic_irq_init(irq);
|
|
|
+ return irq;
|
|
|
}
|
|
|
|
|
|
void destroy_irq(unsigned int irq)
|
|
|
{
|
|
|
dynamic_irq_cleanup(irq);
|
|
|
- free_irq_vector(irq);
|
|
|
+ clear_irq_vector(irq);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -301,14 +433,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
|
|
irq_desc_t *desc;
|
|
|
unsigned int irq;
|
|
|
|
|
|
- for (irq = 0; irq < NR_IRQS; ++irq)
|
|
|
- if (irq_to_vector(irq) == vec) {
|
|
|
- desc = irq_desc + irq;
|
|
|
- desc->status |= IRQ_PER_CPU;
|
|
|
- desc->chip = &irq_type_ia64_lsapic;
|
|
|
- if (action)
|
|
|
- setup_irq(irq, action);
|
|
|
- }
|
|
|
+ irq = vec;
|
|
|
+ BUG_ON(bind_irq_vector(irq, vec));
|
|
|
+ desc = irq_desc + irq;
|
|
|
+ desc->status |= IRQ_PER_CPU;
|
|
|
+ desc->chip = &irq_type_ia64_lsapic;
|
|
|
+ if (action)
|
|
|
+ setup_irq(irq, action);
|
|
|
}
|
|
|
|
|
|
void __init
|