|
@@ -23,9 +23,11 @@ static DEFINE_MUTEX(revmap_trees_mutex);
|
|
|
static struct irq_domain *irq_default_domain;
|
|
|
|
|
|
/**
|
|
|
- * irq_domain_alloc() - Allocate a new irq_domain data structure
|
|
|
+ * __irq_domain_add() - Allocate a new irq_domain data structure
|
|
|
* @of_node: optional device-tree node of the interrupt controller
|
|
|
- * @revmap_type: type of reverse mapping to use
|
|
|
+ * @size: Size of linear map; 0 for radix mapping only
|
|
|
+ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
|
|
|
+ * direct mapping
|
|
|
* @ops: map/unmap domain callbacks
|
|
|
* @host_data: Controller private data pointer
|
|
|
*
|
|
@@ -33,41 +35,35 @@ static struct irq_domain *irq_default_domain;
|
|
|
* register allocated irq_domain with irq_domain_register(). Returns pointer
|
|
|
* to IRQ domain, or NULL on failure.
|
|
|
*/
|
|
|
-static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
|
|
|
- unsigned int revmap_type,
|
|
|
- const struct irq_domain_ops *ops,
|
|
|
- void *host_data)
|
|
|
+struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
|
|
|
+ irq_hw_number_t hwirq_max, int direct_max,
|
|
|
+ const struct irq_domain_ops *ops,
|
|
|
+ void *host_data)
|
|
|
{
|
|
|
struct irq_domain *domain;
|
|
|
|
|
|
- domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
|
|
|
- of_node_to_nid(of_node));
|
|
|
+ domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
|
|
|
+ GFP_KERNEL, of_node_to_nid(of_node));
|
|
|
if (WARN_ON(!domain))
|
|
|
return NULL;
|
|
|
|
|
|
/* Fill structure */
|
|
|
- domain->revmap_type = revmap_type;
|
|
|
+ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
|
|
|
domain->ops = ops;
|
|
|
domain->host_data = host_data;
|
|
|
domain->of_node = of_node_get(of_node);
|
|
|
+ domain->hwirq_max = hwirq_max;
|
|
|
+ domain->revmap_size = size;
|
|
|
+ domain->revmap_direct_max_irq = direct_max;
|
|
|
|
|
|
- return domain;
|
|
|
-}
|
|
|
-
|
|
|
-static void irq_domain_free(struct irq_domain *domain)
|
|
|
-{
|
|
|
- of_node_put(domain->of_node);
|
|
|
- kfree(domain);
|
|
|
-}
|
|
|
-
|
|
|
-static void irq_domain_add(struct irq_domain *domain)
|
|
|
-{
|
|
|
mutex_lock(&irq_domain_mutex);
|
|
|
list_add(&domain->link, &irq_domain_list);
|
|
|
mutex_unlock(&irq_domain_mutex);
|
|
|
- pr_debug("Allocated domain of type %d @0x%p\n",
|
|
|
- domain->revmap_type, domain);
|
|
|
+
|
|
|
+ pr_debug("Added domain %s\n", domain->name);
|
|
|
+ return domain;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(__irq_domain_add);
|
|
|
|
|
|
/**
|
|
|
* irq_domain_remove() - Remove an irq domain.
|
|
@@ -81,29 +77,12 @@ void irq_domain_remove(struct irq_domain *domain)
|
|
|
{
|
|
|
mutex_lock(&irq_domain_mutex);
|
|
|
|
|
|
- switch (domain->revmap_type) {
|
|
|
- case IRQ_DOMAIN_MAP_LEGACY:
|
|
|
- /*
|
|
|
- * Legacy domains don't manage their own irq_desc
|
|
|
- * allocations, we expect the caller to handle irq_desc
|
|
|
- * freeing on their own.
|
|
|
- */
|
|
|
- break;
|
|
|
- case IRQ_DOMAIN_MAP_TREE:
|
|
|
- /*
|
|
|
- * radix_tree_delete() takes care of destroying the root
|
|
|
- * node when all entries are removed. Shout if there are
|
|
|
- * any mappings left.
|
|
|
- */
|
|
|
- WARN_ON(domain->revmap_data.tree.height);
|
|
|
- break;
|
|
|
- case IRQ_DOMAIN_MAP_LINEAR:
|
|
|
- kfree(domain->revmap_data.linear.revmap);
|
|
|
- domain->revmap_data.linear.size = 0;
|
|
|
- break;
|
|
|
- case IRQ_DOMAIN_MAP_NOMAP:
|
|
|
- break;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * radix_tree_delete() takes care of destroying the root
|
|
|
+ * node when all entries are removed. Shout if there are
|
|
|
+ * any mappings left.
|
|
|
+ */
|
|
|
+ WARN_ON(domain->revmap_tree.height);
|
|
|
|
|
|
list_del(&domain->link);
|
|
|
|
|
@@ -115,44 +94,30 @@ void irq_domain_remove(struct irq_domain *domain)
|
|
|
|
|
|
mutex_unlock(&irq_domain_mutex);
|
|
|
|
|
|
- pr_debug("Removed domain of type %d @0x%p\n",
|
|
|
- domain->revmap_type, domain);
|
|
|
+ pr_debug("Removed domain %s\n", domain->name);
|
|
|
|
|
|
- irq_domain_free(domain);
|
|
|
+ of_node_put(domain->of_node);
|
|
|
+ kfree(domain);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_domain_remove);
|
|
|
|
|
|
-static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
|
|
|
- irq_hw_number_t hwirq)
|
|
|
-{
|
|
|
- irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
|
|
|
- int size = domain->revmap_data.legacy.size;
|
|
|
-
|
|
|
- if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
|
|
|
- return 0;
|
|
|
- return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
- * irq_domain_add_simple() - Allocate and register a simple irq_domain.
|
|
|
+ * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
|
|
|
* @of_node: pointer to interrupt controller's device tree node.
|
|
|
* @size: total number of irqs in mapping
|
|
|
* @first_irq: first number of irq block assigned to the domain,
|
|
|
- * pass zero to assign irqs on-the-fly. This will result in a
|
|
|
- * linear IRQ domain so it is important to use irq_create_mapping()
|
|
|
- * for each used IRQ, especially when SPARSE_IRQ is enabled.
|
|
|
+ * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
|
|
|
+ * pre-map all of the irqs in the domain to virqs starting at first_irq.
|
|
|
* @ops: map/unmap domain callbacks
|
|
|
* @host_data: Controller private data pointer
|
|
|
*
|
|
|
- * Allocates a legacy irq_domain if irq_base is positive or a linear
|
|
|
- * domain otherwise. For the legacy domain, IRQ descriptors will also
|
|
|
- * be allocated.
|
|
|
+ * Allocates an irq_domain, and optionally if first_irq is positive then also
|
|
|
+ * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
|
|
|
*
|
|
|
* This is intended to implement the expected behaviour for most
|
|
|
- * interrupt controllers which is that a linear mapping should
|
|
|
- * normally be used unless the system requires a legacy mapping in
|
|
|
- * order to support supplying interrupt numbers during non-DT
|
|
|
- * registration of devices.
|
|
|
+ * interrupt controllers. If device tree is used, then first_irq will be 0 and
|
|
|
+ * irqs get mapped dynamically on the fly. However, if the controller requires
|
|
|
+ * static virq assignments (non-DT boot) then it will set that up correctly.
|
|
|
*/
|
|
|
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
|
|
|
unsigned int size,
|
|
@@ -160,33 +125,25 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
|
|
|
const struct irq_domain_ops *ops,
|
|
|
void *host_data)
|
|
|
{
|
|
|
- if (first_irq > 0) {
|
|
|
- int irq_base;
|
|
|
+ struct irq_domain *domain;
|
|
|
+
|
|
|
+ domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
|
|
|
+ if (!domain)
|
|
|
+ return NULL;
|
|
|
|
|
|
+ if (first_irq > 0) {
|
|
|
if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
|
|
|
- /*
|
|
|
- * Set the descriptor allocator to search for a
|
|
|
- * 1-to-1 mapping, such as irq_alloc_desc_at().
|
|
|
- * Use of_node_to_nid() which is defined to
|
|
|
- * numa_node_id() on platforms that have no custom
|
|
|
- * implementation.
|
|
|
- */
|
|
|
- irq_base = irq_alloc_descs(first_irq, first_irq, size,
|
|
|
- of_node_to_nid(of_node));
|
|
|
- if (irq_base < 0) {
|
|
|
+ /* attempt to allocated irq_descs */
|
|
|
+ int rc = irq_alloc_descs(first_irq, first_irq, size,
|
|
|
+ of_node_to_nid(of_node));
|
|
|
+ if (rc < 0)
|
|
|
pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
|
|
|
first_irq);
|
|
|
- irq_base = first_irq;
|
|
|
- }
|
|
|
- } else
|
|
|
- irq_base = first_irq;
|
|
|
-
|
|
|
- return irq_domain_add_legacy(of_node, size, irq_base, 0,
|
|
|
- ops, host_data);
|
|
|
+ }
|
|
|
+ irq_domain_associate_many(domain, first_irq, 0, size);
|
|
|
}
|
|
|
|
|
|
- /* A linear domain is the default */
|
|
|
- return irq_domain_add_linear(of_node, size, ops, host_data);
|
|
|
+ return domain;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
|
|
|
|
|
@@ -213,130 +170,18 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
|
|
|
void *host_data)
|
|
|
{
|
|
|
struct irq_domain *domain;
|
|
|
- unsigned int i;
|
|
|
|
|
|
- domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
|
|
|
+ domain = __irq_domain_add(of_node, first_hwirq + size,
|
|
|
+ first_hwirq + size, 0, ops, host_data);
|
|
|
if (!domain)
|
|
|
return NULL;
|
|
|
|
|
|
- domain->revmap_data.legacy.first_irq = first_irq;
|
|
|
- domain->revmap_data.legacy.first_hwirq = first_hwirq;
|
|
|
- domain->revmap_data.legacy.size = size;
|
|
|
+ irq_domain_associate_many(domain, first_irq, first_hwirq, size);
|
|
|
|
|
|
- mutex_lock(&irq_domain_mutex);
|
|
|
- /* Verify that all the irqs are available */
|
|
|
- for (i = 0; i < size; i++) {
|
|
|
- int irq = first_irq + i;
|
|
|
- struct irq_data *irq_data = irq_get_irq_data(irq);
|
|
|
-
|
|
|
- if (WARN_ON(!irq_data || irq_data->domain)) {
|
|
|
- mutex_unlock(&irq_domain_mutex);
|
|
|
- irq_domain_free(domain);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* Claim all of the irqs before registering a legacy domain */
|
|
|
- for (i = 0; i < size; i++) {
|
|
|
- struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
|
|
|
- irq_data->hwirq = first_hwirq + i;
|
|
|
- irq_data->domain = domain;
|
|
|
- }
|
|
|
- mutex_unlock(&irq_domain_mutex);
|
|
|
-
|
|
|
- for (i = 0; i < size; i++) {
|
|
|
- int irq = first_irq + i;
|
|
|
- int hwirq = first_hwirq + i;
|
|
|
-
|
|
|
- /* IRQ0 gets ignored */
|
|
|
- if (!irq)
|
|
|
- continue;
|
|
|
-
|
|
|
- /* Legacy flags are left to default at this point,
|
|
|
- * one can then use irq_create_mapping() to
|
|
|
- * explicitly change them
|
|
|
- */
|
|
|
- if (ops->map)
|
|
|
- ops->map(domain, irq, hwirq);
|
|
|
-
|
|
|
- /* Clear norequest flags */
|
|
|
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
|
|
|
- }
|
|
|
-
|
|
|
- irq_domain_add(domain);
|
|
|
return domain;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
|
|
|
|
|
|
-/**
|
|
|
- * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
|
|
|
- * @of_node: pointer to interrupt controller's device tree node.
|
|
|
- * @size: Number of interrupts in the domain.
|
|
|
- * @ops: map/unmap domain callbacks
|
|
|
- * @host_data: Controller private data pointer
|
|
|
- */
|
|
|
-struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
|
|
|
- unsigned int size,
|
|
|
- const struct irq_domain_ops *ops,
|
|
|
- void *host_data)
|
|
|
-{
|
|
|
- struct irq_domain *domain;
|
|
|
- unsigned int *revmap;
|
|
|
-
|
|
|
- revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
|
|
|
- of_node_to_nid(of_node));
|
|
|
- if (WARN_ON(!revmap))
|
|
|
- return NULL;
|
|
|
-
|
|
|
- domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
|
|
|
- if (!domain) {
|
|
|
- kfree(revmap);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
- domain->revmap_data.linear.size = size;
|
|
|
- domain->revmap_data.linear.revmap = revmap;
|
|
|
- irq_domain_add(domain);
|
|
|
- return domain;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(irq_domain_add_linear);
|
|
|
-
|
|
|
-struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
|
|
|
- unsigned int max_irq,
|
|
|
- const struct irq_domain_ops *ops,
|
|
|
- void *host_data)
|
|
|
-{
|
|
|
- struct irq_domain *domain = irq_domain_alloc(of_node,
|
|
|
- IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
|
|
|
- if (domain) {
|
|
|
- domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
|
|
|
- irq_domain_add(domain);
|
|
|
- }
|
|
|
- return domain;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
|
|
|
-
|
|
|
-/**
|
|
|
- * irq_domain_add_tree()
|
|
|
- * @of_node: pointer to interrupt controller's device tree node.
|
|
|
- * @ops: map/unmap domain callbacks
|
|
|
- *
|
|
|
- * Note: The radix tree will be allocated later during boot automatically
|
|
|
- * (the reverse mapping will use the slow path until that happens).
|
|
|
- */
|
|
|
-struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
|
|
|
- const struct irq_domain_ops *ops,
|
|
|
- void *host_data)
|
|
|
-{
|
|
|
- struct irq_domain *domain = irq_domain_alloc(of_node,
|
|
|
- IRQ_DOMAIN_MAP_TREE, ops, host_data);
|
|
|
- if (domain) {
|
|
|
- INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
|
|
|
- irq_domain_add(domain);
|
|
|
- }
|
|
|
- return domain;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(irq_domain_add_tree);
|
|
|
-
|
|
|
/**
|
|
|
* irq_find_host() - Locates a domain for a given device node
|
|
|
* @node: device-tree node of the interrupt controller
|
|
@@ -385,125 +230,108 @@ void irq_set_default_host(struct irq_domain *domain)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_set_default_host);
|
|
|
|
|
|
-static void irq_domain_disassociate_many(struct irq_domain *domain,
|
|
|
- unsigned int irq_base, int count)
|
|
|
+static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
|
|
|
{
|
|
|
- /*
|
|
|
- * disassociate in reverse order;
|
|
|
- * not strictly necessary, but nice for unwinding
|
|
|
- */
|
|
|
- while (count--) {
|
|
|
- int irq = irq_base + count;
|
|
|
- struct irq_data *irq_data = irq_get_irq_data(irq);
|
|
|
- irq_hw_number_t hwirq;
|
|
|
+ struct irq_data *irq_data = irq_get_irq_data(irq);
|
|
|
+ irq_hw_number_t hwirq;
|
|
|
|
|
|
- if (WARN_ON(!irq_data || irq_data->domain != domain))
|
|
|
- continue;
|
|
|
+ if (WARN(!irq_data || irq_data->domain != domain,
|
|
|
+ "virq%i doesn't exist; cannot disassociate\n", irq))
|
|
|
+ return;
|
|
|
|
|
|
- hwirq = irq_data->hwirq;
|
|
|
- irq_set_status_flags(irq, IRQ_NOREQUEST);
|
|
|
+ hwirq = irq_data->hwirq;
|
|
|
+ irq_set_status_flags(irq, IRQ_NOREQUEST);
|
|
|
|
|
|
- /* remove chip and handler */
|
|
|
- irq_set_chip_and_handler(irq, NULL, NULL);
|
|
|
+ /* remove chip and handler */
|
|
|
+ irq_set_chip_and_handler(irq, NULL, NULL);
|
|
|
|
|
|
- /* Make sure it's completed */
|
|
|
- synchronize_irq(irq);
|
|
|
+ /* Make sure it's completed */
|
|
|
+ synchronize_irq(irq);
|
|
|
|
|
|
- /* Tell the PIC about it */
|
|
|
- if (domain->ops->unmap)
|
|
|
- domain->ops->unmap(domain, irq);
|
|
|
- smp_mb();
|
|
|
+ /* Tell the PIC about it */
|
|
|
+ if (domain->ops->unmap)
|
|
|
+ domain->ops->unmap(domain, irq);
|
|
|
+ smp_mb();
|
|
|
|
|
|
- irq_data->domain = NULL;
|
|
|
- irq_data->hwirq = 0;
|
|
|
+ irq_data->domain = NULL;
|
|
|
+ irq_data->hwirq = 0;
|
|
|
|
|
|
- /* Clear reverse map */
|
|
|
- switch(domain->revmap_type) {
|
|
|
- case IRQ_DOMAIN_MAP_LINEAR:
|
|
|
- if (hwirq < domain->revmap_data.linear.size)
|
|
|
- domain->revmap_data.linear.revmap[hwirq] = 0;
|
|
|
- break;
|
|
|
- case IRQ_DOMAIN_MAP_TREE:
|
|
|
- mutex_lock(&revmap_trees_mutex);
|
|
|
- radix_tree_delete(&domain->revmap_data.tree, hwirq);
|
|
|
- mutex_unlock(&revmap_trees_mutex);
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* Clear reverse map for this hwirq */
|
|
|
+ if (hwirq < domain->revmap_size) {
|
|
|
+ domain->linear_revmap[hwirq] = 0;
|
|
|
+ } else {
|
|
|
+ mutex_lock(&revmap_trees_mutex);
|
|
|
+ radix_tree_delete(&domain->revmap_tree, hwirq);
|
|
|
+ mutex_unlock(&revmap_trees_mutex);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
|
|
|
- irq_hw_number_t hwirq_base, int count)
|
|
|
+int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
|
|
|
+ irq_hw_number_t hwirq)
|
|
|
{
|
|
|
- unsigned int virq = irq_base;
|
|
|
- irq_hw_number_t hwirq = hwirq_base;
|
|
|
- int i, ret;
|
|
|
+ struct irq_data *irq_data = irq_get_irq_data(virq);
|
|
|
+ int ret;
|
|
|
|
|
|
- pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
|
|
|
- of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
|
|
|
+ if (WARN(hwirq >= domain->hwirq_max,
|
|
|
+ "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
|
|
|
+ return -EINVAL;
|
|
|
+ if (WARN(!irq_data, "error: virq%i is not allocated", virq))
|
|
|
+ return -EINVAL;
|
|
|
+ if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- for (i = 0; i < count; i++) {
|
|
|
- struct irq_data *irq_data = irq_get_irq_data(virq + i);
|
|
|
-
|
|
|
- if (WARN(!irq_data, "error: irq_desc not allocated; "
|
|
|
- "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
|
|
|
- return -EINVAL;
|
|
|
- if (WARN(irq_data->domain, "error: irq_desc already associated; "
|
|
|
- "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
|
|
|
- return -EINVAL;
|
|
|
- };
|
|
|
-
|
|
|
- for (i = 0; i < count; i++, virq++, hwirq++) {
|
|
|
- struct irq_data *irq_data = irq_get_irq_data(virq);
|
|
|
-
|
|
|
- irq_data->hwirq = hwirq;
|
|
|
- irq_data->domain = domain;
|
|
|
- if (domain->ops->map) {
|
|
|
- ret = domain->ops->map(domain, virq, hwirq);
|
|
|
- if (ret != 0) {
|
|
|
- /*
|
|
|
- * If map() returns -EPERM, this interrupt is protected
|
|
|
- * by the firmware or some other service and shall not
|
|
|
- * be mapped.
|
|
|
- *
|
|
|
- * Since on some platforms we blindly try to map everything
|
|
|
- * we end up with a log full of backtraces.
|
|
|
- *
|
|
|
- * So instead, we silently fail on -EPERM, it is the
|
|
|
- * responsibility of the PIC driver to display a relevant
|
|
|
- * message if needed.
|
|
|
- */
|
|
|
- if (ret != -EPERM) {
|
|
|
- pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
|
|
|
- virq, hwirq, ret);
|
|
|
- WARN_ON(1);
|
|
|
- }
|
|
|
- irq_data->domain = NULL;
|
|
|
- irq_data->hwirq = 0;
|
|
|
- goto err_unmap;
|
|
|
+ mutex_lock(&irq_domain_mutex);
|
|
|
+ irq_data->hwirq = hwirq;
|
|
|
+ irq_data->domain = domain;
|
|
|
+ if (domain->ops->map) {
|
|
|
+ ret = domain->ops->map(domain, virq, hwirq);
|
|
|
+ if (ret != 0) {
|
|
|
+ /*
|
|
|
+ * If map() returns -EPERM, this interrupt is protected
|
|
|
+ * by the firmware or some other service and shall not
|
|
|
+ * be mapped. Don't bother telling the user about it.
|
|
|
+ */
|
|
|
+ if (ret != -EPERM) {
|
|
|
+ pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
|
|
|
+ domain->name, hwirq, virq, ret);
|
|
|
}
|
|
|
+ irq_data->domain = NULL;
|
|
|
+ irq_data->hwirq = 0;
|
|
|
+ mutex_unlock(&irq_domain_mutex);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
- switch (domain->revmap_type) {
|
|
|
- case IRQ_DOMAIN_MAP_LINEAR:
|
|
|
- if (hwirq < domain->revmap_data.linear.size)
|
|
|
- domain->revmap_data.linear.revmap[hwirq] = virq;
|
|
|
- break;
|
|
|
- case IRQ_DOMAIN_MAP_TREE:
|
|
|
- mutex_lock(&revmap_trees_mutex);
|
|
|
- radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
|
|
|
- mutex_unlock(&revmap_trees_mutex);
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* If not already assigned, give the domain the chip's name */
|
|
|
+ if (!domain->name && irq_data->chip)
|
|
|
+ domain->name = irq_data->chip->name;
|
|
|
+ }
|
|
|
|
|
|
- irq_clear_status_flags(virq, IRQ_NOREQUEST);
|
|
|
+ if (hwirq < domain->revmap_size) {
|
|
|
+ domain->linear_revmap[hwirq] = virq;
|
|
|
+ } else {
|
|
|
+ mutex_lock(&revmap_trees_mutex);
|
|
|
+ radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
|
|
+ mutex_unlock(&revmap_trees_mutex);
|
|
|
}
|
|
|
+ mutex_unlock(&irq_domain_mutex);
|
|
|
+
|
|
|
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
|
|
|
|
|
|
return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(irq_domain_associate);
|
|
|
|
|
|
- err_unmap:
|
|
|
- irq_domain_disassociate_many(domain, irq_base, i);
|
|
|
- return -EINVAL;
|
|
|
+void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
|
|
|
+ irq_hw_number_t hwirq_base, int count)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
|
|
|
+ of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
|
|
|
+
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
+ irq_domain_associate(domain, irq_base + i, hwirq_base + i);
|
|
|
+ }
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
|
|
|
|
|
@@ -513,7 +341,9 @@ EXPORT_SYMBOL_GPL(irq_domain_associate_many);
|
|
|
*
|
|
|
* This routine is used for irq controllers which can choose the hardware
|
|
|
* interrupt numbers they generate. In such a case it's simplest to use
|
|
|
- * the linux irq as the hardware interrupt number.
|
|
|
+ * the linux irq as the hardware interrupt number. It still uses the linear
|
|
|
+ * or radix tree to store the mapping, but the irq controller can optimize
|
|
|
+ * the revmap path by using the hwirq directly.
|
|
|
*/
|
|
|
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
|
|
|
{
|
|
@@ -522,17 +352,14 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
|
|
|
if (domain == NULL)
|
|
|
domain = irq_default_domain;
|
|
|
|
|
|
- if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
|
|
|
- return 0;
|
|
|
-
|
|
|
virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
|
|
|
if (!virq) {
|
|
|
pr_debug("create_direct virq allocation failed\n");
|
|
|
return 0;
|
|
|
}
|
|
|
- if (virq >= domain->revmap_data.nomap.max_irq) {
|
|
|
+ if (virq >= domain->revmap_direct_max_irq) {
|
|
|
pr_err("ERROR: no free irqs available below %i maximum\n",
|
|
|
- domain->revmap_data.nomap.max_irq);
|
|
|
+ domain->revmap_direct_max_irq);
|
|
|
irq_free_desc(virq);
|
|
|
return 0;
|
|
|
}
|
|
@@ -569,9 +396,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
|
|
|
if (domain == NULL)
|
|
|
domain = irq_default_domain;
|
|
|
if (domain == NULL) {
|
|
|
- pr_warning("irq_create_mapping called for"
|
|
|
- " NULL domain, hwirq=%lx\n", hwirq);
|
|
|
- WARN_ON(1);
|
|
|
+ WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
|
|
|
return 0;
|
|
|
}
|
|
|
pr_debug("-> using domain @%p\n", domain);
|
|
@@ -583,10 +408,6 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
|
|
|
return virq;
|
|
|
}
|
|
|
|
|
|
- /* Get a virtual interrupt number */
|
|
|
- if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
|
|
|
- return irq_domain_legacy_revmap(domain, hwirq);
|
|
|
-
|
|
|
/* Allocate a virtual interrupt number */
|
|
|
hint = hwirq % nr_irqs;
|
|
|
if (hint == 0)
|
|
@@ -639,12 +460,7 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
|
|
|
if (unlikely(ret < 0))
|
|
|
return ret;
|
|
|
|
|
|
- ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
|
|
|
- if (unlikely(ret < 0)) {
|
|
|
- irq_free_descs(irq_base, count);
|
|
|
- return ret;
|
|
|
- }
|
|
|
-
|
|
|
+ irq_domain_associate_many(domain, irq_base, hwirq_base, count);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
|
|
@@ -671,8 +487,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
|
|
|
if (intsize > 0)
|
|
|
return intspec[0];
|
|
|
#endif
|
|
|
- pr_warning("no irq domain found for %s !\n",
|
|
|
- of_node_full_name(controller));
|
|
|
+ pr_warn("no irq domain found for %s !\n",
|
|
|
+ of_node_full_name(controller));
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -714,11 +530,7 @@ void irq_dispose_mapping(unsigned int virq)
|
|
|
if (WARN_ON(domain == NULL))
|
|
|
return;
|
|
|
|
|
|
- /* Never unmap legacy interrupts */
|
|
|
- if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
|
|
|
- return;
|
|
|
-
|
|
|
- irq_domain_disassociate_many(domain, virq, 1);
|
|
|
+ irq_domain_disassociate(domain, virq);
|
|
|
irq_free_desc(virq);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
|
|
@@ -739,63 +551,51 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
|
|
|
if (domain == NULL)
|
|
|
return 0;
|
|
|
|
|
|
- switch (domain->revmap_type) {
|
|
|
- case IRQ_DOMAIN_MAP_LEGACY:
|
|
|
- return irq_domain_legacy_revmap(domain, hwirq);
|
|
|
- case IRQ_DOMAIN_MAP_LINEAR:
|
|
|
- return irq_linear_revmap(domain, hwirq);
|
|
|
- case IRQ_DOMAIN_MAP_TREE:
|
|
|
- rcu_read_lock();
|
|
|
- data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
|
|
|
- rcu_read_unlock();
|
|
|
- if (data)
|
|
|
- return data->irq;
|
|
|
- break;
|
|
|
- case IRQ_DOMAIN_MAP_NOMAP:
|
|
|
+ if (hwirq < domain->revmap_direct_max_irq) {
|
|
|
data = irq_get_irq_data(hwirq);
|
|
|
if (data && (data->domain == domain) && (data->hwirq == hwirq))
|
|
|
return hwirq;
|
|
|
- break;
|
|
|
}
|
|
|
|
|
|
- return 0;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(irq_find_mapping);
|
|
|
+ /* Check if the hwirq is in the linear revmap. */
|
|
|
+ if (hwirq < domain->revmap_size)
|
|
|
+ return domain->linear_revmap[hwirq];
|
|
|
|
|
|
-/**
|
|
|
- * irq_linear_revmap() - Find a linux irq from a hw irq number.
|
|
|
- * @domain: domain owning this hardware interrupt
|
|
|
- * @hwirq: hardware irq number in that domain space
|
|
|
- *
|
|
|
- * This is a fast path that can be called directly by irq controller code to
|
|
|
- * save a handful of instructions.
|
|
|
- */
|
|
|
-unsigned int irq_linear_revmap(struct irq_domain *domain,
|
|
|
- irq_hw_number_t hwirq)
|
|
|
-{
|
|
|
- BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
|
|
|
-
|
|
|
- /* Check revmap bounds; complain if exceeded */
|
|
|
- if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
|
|
|
- return 0;
|
|
|
-
|
|
|
- return domain->revmap_data.linear.revmap[hwirq];
|
|
|
+ rcu_read_lock();
|
|
|
+ data = radix_tree_lookup(&domain->revmap_tree, hwirq);
|
|
|
+ rcu_read_unlock();
|
|
|
+ return data ? data->irq : 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(irq_linear_revmap);
|
|
|
+EXPORT_SYMBOL_GPL(irq_find_mapping);
|
|
|
|
|
|
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
|
|
|
static int virq_debug_show(struct seq_file *m, void *private)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct irq_desc *desc;
|
|
|
- const char *p;
|
|
|
- static const char none[] = "none";
|
|
|
- void *data;
|
|
|
+ struct irq_domain *domain;
|
|
|
+ struct radix_tree_iter iter;
|
|
|
+ void *data, **slot;
|
|
|
int i;
|
|
|
|
|
|
- seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
|
|
|
+ seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
|
|
|
+ "name", "mapped", "linear-max", "direct-max", "devtree-node");
|
|
|
+ mutex_lock(&irq_domain_mutex);
|
|
|
+ list_for_each_entry(domain, &irq_domain_list, link) {
|
|
|
+ int count = 0;
|
|
|
+ radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
|
|
|
+ count++;
|
|
|
+ seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
|
|
|
+ domain == irq_default_domain ? '*' : ' ', domain->name,
|
|
|
+ domain->revmap_size + count, domain->revmap_size,
|
|
|
+ domain->revmap_direct_max_irq,
|
|
|
+ domain->of_node ? of_node_full_name(domain->of_node) : "");
|
|
|
+ }
|
|
|
+ mutex_unlock(&irq_domain_mutex);
|
|
|
+
|
|
|
+ seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
|
|
|
"chip name", (int)(2 * sizeof(void *) + 2), "chip data",
|
|
|
- "domain name");
|
|
|
+ "active", "type", "domain");
|
|
|
|
|
|
for (i = 1; i < nr_irqs; i++) {
|
|
|
desc = irq_to_desc(i);
|
|
@@ -803,28 +603,28 @@ static int virq_debug_show(struct seq_file *m, void *private)
|
|
|
continue;
|
|
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
+ domain = desc->irq_data.domain;
|
|
|
|
|
|
- if (desc->action && desc->action->handler) {
|
|
|
+ if (domain) {
|
|
|
struct irq_chip *chip;
|
|
|
+ int hwirq = desc->irq_data.hwirq;
|
|
|
+ bool direct;
|
|
|
|
|
|
seq_printf(m, "%5d ", i);
|
|
|
- seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
|
|
|
+ seq_printf(m, "0x%05x ", hwirq);
|
|
|
|
|
|
chip = irq_desc_get_chip(desc);
|
|
|
- if (chip && chip->name)
|
|
|
- p = chip->name;
|
|
|
- else
|
|
|
- p = none;
|
|
|
- seq_printf(m, "%-15s ", p);
|
|
|
+ seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
|
|
|
|
|
|
data = irq_desc_get_chip_data(desc);
|
|
|
seq_printf(m, data ? "0x%p " : " %p ", data);
|
|
|
|
|
|
- if (desc->irq_data.domain)
|
|
|
- p = of_node_full_name(desc->irq_data.domain->of_node);
|
|
|
- else
|
|
|
- p = none;
|
|
|
- seq_printf(m, "%s\n", p);
|
|
|
+ seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
|
|
|
+ direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
|
|
|
+ seq_printf(m, "%6s%-8s ",
|
|
|
+ (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
|
|
|
+ direct ? "(DIRECT)" : "");
|
|
|
+ seq_printf(m, "%s\n", desc->irq_data.domain->name);
|
|
|
}
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
@@ -921,18 +721,3 @@ const struct irq_domain_ops irq_domain_simple_ops = {
|
|
|
.xlate = irq_domain_xlate_onetwocell,
|
|
|
};
|
|
|
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
|
|
|
-
|
|
|
-#ifdef CONFIG_OF_IRQ
|
|
|
-void irq_domain_generate_simple(const struct of_device_id *match,
|
|
|
- u64 phys_base, unsigned int irq_start)
|
|
|
-{
|
|
|
- struct device_node *node;
|
|
|
- pr_debug("looking for phys_base=%llx, irq_start=%i\n",
|
|
|
- (unsigned long long) phys_base, (int) irq_start);
|
|
|
- node = of_find_matching_node_by_address(NULL, match, phys_base);
|
|
|
- if (node)
|
|
|
- irq_domain_add_legacy(node, 32, irq_start, 0,
|
|
|
- &irq_domain_simple_ops, NULL);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
|
|
|
-#endif
|