|
@@ -441,6 +441,7 @@ static LIST_HEAD(irq_hosts);
|
|
|
static DEFINE_SPINLOCK(irq_big_lock);
|
|
|
static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
|
|
|
static unsigned int irq_radix_writer;
|
|
|
+static unsigned int revmap_trees_allocated;
|
|
|
struct irq_map_entry irq_map[NR_IRQS];
|
|
|
static unsigned int irq_virq_count = NR_IRQS;
|
|
|
static struct irq_host *irq_default_host;
|
|
@@ -821,8 +822,12 @@ void irq_dispose_mapping(unsigned int virq)
|
|
|
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
|
|
|
break;
|
|
|
case IRQ_HOST_MAP_TREE:
|
|
|
- /* Check if radix tree allocated yet */
|
|
|
- if (host->revmap_data.tree.gfp_mask == 0)
|
|
|
+ /*
|
|
|
+ * Check if radix tree allocated yet, if not then nothing to
|
|
|
+ * remove.
|
|
|
+ */
|
|
|
+ smp_rmb();
|
|
|
+ if (revmap_trees_allocated < 1)
|
|
|
break;
|
|
|
irq_radix_wrlock(&flags);
|
|
|
radix_tree_delete(&host->revmap_data.tree, hwirq);
|
|
@@ -875,43 +880,62 @@ unsigned int irq_find_mapping(struct irq_host *host,
|
|
|
EXPORT_SYMBOL_GPL(irq_find_mapping);
|
|
|
|
|
|
|
|
|
-unsigned int irq_radix_revmap(struct irq_host *host,
|
|
|
- irq_hw_number_t hwirq)
|
|
|
+unsigned int irq_radix_revmap_lookup(struct irq_host *host,
|
|
|
+ irq_hw_number_t hwirq)
|
|
|
{
|
|
|
- struct radix_tree_root *tree;
|
|
|
struct irq_map_entry *ptr;
|
|
|
unsigned int virq;
|
|
|
unsigned long flags;
|
|
|
|
|
|
WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
|
|
|
|
|
|
- /* Check if the radix tree exist yet. We test the value of
|
|
|
- * the gfp_mask for that. Sneaky but saves another int in the
|
|
|
- * structure. If not, we fallback to slow mode
|
|
|
+ /*
|
|
|
+ * Check if the radix tree exists and has bee initialized.
|
|
|
+ * If not, we fallback to slow mode
|
|
|
*/
|
|
|
- tree = &host->revmap_data.tree;
|
|
|
- if (tree->gfp_mask == 0)
|
|
|
+ if (revmap_trees_allocated < 2)
|
|
|
return irq_find_mapping(host, hwirq);
|
|
|
|
|
|
/* Now try to resolve */
|
|
|
irq_radix_rdlock(&flags);
|
|
|
- ptr = radix_tree_lookup(tree, hwirq);
|
|
|
+ ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
|
|
|
irq_radix_rdunlock(flags);
|
|
|
|
|
|
- /* Found it, return */
|
|
|
- if (ptr) {
|
|
|
+ /*
|
|
|
+ * If found in radix tree, then fine.
|
|
|
+ * Else fallback to linear lookup - this should not happen in practice
|
|
|
+ * as it means that we failed to insert the node in the radix tree.
|
|
|
+ */
|
|
|
+ if (ptr)
|
|
|
virq = ptr - irq_map;
|
|
|
- return virq;
|
|
|
- }
|
|
|
+ else
|
|
|
+ virq = irq_find_mapping(host, hwirq);
|
|
|
+
|
|
|
+ return virq;
|
|
|
+}
|
|
|
+
|
|
|
+void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
|
|
|
+ irq_hw_number_t hwirq)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check if the radix tree exists yet.
|
|
|
+ * If not, then the irq will be inserted into the tree when it gets
|
|
|
+ * initialized.
|
|
|
+ */
|
|
|
+ smp_rmb();
|
|
|
+ if (revmap_trees_allocated < 1)
|
|
|
+ return;
|
|
|
|
|
|
- /* If not there, try to insert it */
|
|
|
- virq = irq_find_mapping(host, hwirq);
|
|
|
if (virq != NO_IRQ) {
|
|
|
irq_radix_wrlock(&flags);
|
|
|
- radix_tree_insert(tree, hwirq, &irq_map[virq]);
|
|
|
+ radix_tree_insert(&host->revmap_data.tree, hwirq,
|
|
|
+ &irq_map[virq]);
|
|
|
irq_radix_wrunlock(flags);
|
|
|
}
|
|
|
- return virq;
|
|
|
}
|
|
|
|
|
|
unsigned int irq_linear_revmap(struct irq_host *host,
|
|
@@ -1021,14 +1045,45 @@ static int irq_late_init(void)
|
|
|
{
|
|
|
struct irq_host *h;
|
|
|
unsigned long flags;
|
|
|
+ unsigned int i;
|
|
|
|
|
|
- irq_radix_wrlock(&flags);
|
|
|
+ /*
|
|
|
+ * No mutual exclusion with respect to accessors of the tree is needed
|
|
|
+ * here as the synchronization is done via the state variable
|
|
|
+ * revmap_trees_allocated.
|
|
|
+ */
|
|
|
list_for_each_entry(h, &irq_hosts, link) {
|
|
|
if (h->revmap_type == IRQ_HOST_MAP_TREE)
|
|
|
- INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
|
|
|
+ INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Make sure the radix trees inits are visible before setting
|
|
|
+ * the flag
|
|
|
+ */
|
|
|
+ smp_wmb();
|
|
|
+ revmap_trees_allocated = 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Insert the reverse mapping for those interrupts already present
|
|
|
+ * in irq_map[].
|
|
|
+ */
|
|
|
+ irq_radix_wrlock(&flags);
|
|
|
+ for (i = 0; i < irq_virq_count; i++) {
|
|
|
+ if (irq_map[i].host &&
|
|
|
+ (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
|
|
|
+ radix_tree_insert(&irq_map[i].host->revmap_data.tree,
|
|
|
+ irq_map[i].hwirq, &irq_map[i]);
|
|
|
}
|
|
|
irq_radix_wrunlock(flags);
|
|
|
|
|
|
+ /*
|
|
|
+ * Make sure the radix trees insertions are visible before setting
|
|
|
+ * the flag
|
|
|
+ */
|
|
|
+ smp_wmb();
|
|
|
+ revmap_trees_allocated = 2;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
arch_initcall(irq_late_init);
|