|
@@ -9,8 +9,10 @@
|
|
|
* kind, whether express or implied.
|
|
|
*/
|
|
|
|
|
|
+#include <linux/export.h>
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/irq.h>
|
|
|
+#include <linux/irqdomain.h>
|
|
|
#include <linux/io.h>
|
|
|
|
|
|
#include <mach/common.h>
|
|
@@ -28,7 +30,7 @@ static inline void cp_intc_write(unsigned long value, unsigned offset)
|
|
|
|
|
|
static void cp_intc_ack_irq(struct irq_data *d)
|
|
|
{
|
|
|
- cp_intc_write(d->irq, CP_INTC_SYS_STAT_IDX_CLR);
|
|
|
+ cp_intc_write(d->hwirq, CP_INTC_SYS_STAT_IDX_CLR);
|
|
|
}
|
|
|
|
|
|
/* Disable interrupt */
|
|
@@ -36,20 +38,20 @@ static void cp_intc_mask_irq(struct irq_data *d)
|
|
|
{
|
|
|
/* XXX don't know why we need to disable nIRQ here... */
|
|
|
cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR);
|
|
|
- cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_CLR);
|
|
|
+ cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_CLR);
|
|
|
cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
|
|
|
}
|
|
|
|
|
|
/* Enable interrupt */
|
|
|
static void cp_intc_unmask_irq(struct irq_data *d)
|
|
|
{
|
|
|
- cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_SET);
|
|
|
+ cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_SET);
|
|
|
}
|
|
|
|
|
|
static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type)
|
|
|
{
|
|
|
- unsigned reg = BIT_WORD(d->irq);
|
|
|
- unsigned mask = BIT_MASK(d->irq);
|
|
|
+ unsigned reg = BIT_WORD(d->hwirq);
|
|
|
+ unsigned mask = BIT_MASK(d->hwirq);
|
|
|
unsigned polarity = cp_intc_read(CP_INTC_SYS_POLARITY(reg));
|
|
|
unsigned type = cp_intc_read(CP_INTC_SYS_TYPE(reg));
|
|
|
|
|
@@ -99,18 +101,36 @@ static struct irq_chip cp_intc_irq_chip = {
|
|
|
.irq_set_wake = cp_intc_set_wake,
|
|
|
};
|
|
|
|
|
|
-void __init cp_intc_init(void)
|
|
|
+static struct irq_domain *cp_intc_domain;
|
|
|
+
|
|
|
+static int cp_intc_host_map(struct irq_domain *h, unsigned int virq,
|
|
|
+ irq_hw_number_t hw)
|
|
|
+{
|
|
|
+ pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
|
|
|
+
|
|
|
+ irq_set_chip(virq, &cp_intc_irq_chip);
|
|
|
+ set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
|
|
|
+ irq_set_handler(virq, handle_edge_irq);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct irq_domain_ops cp_intc_host_ops = {
|
|
|
+ .map = cp_intc_host_map,
|
|
|
+ .xlate = irq_domain_xlate_onetwocell,
|
|
|
+};
|
|
|
+
|
|
|
+int __init __cp_intc_init(struct device_node *node)
|
|
|
{
|
|
|
- unsigned long num_irq = davinci_soc_info.intc_irq_num;
|
|
|
+ u32 num_irq = davinci_soc_info.intc_irq_num;
|
|
|
u8 *irq_prio = davinci_soc_info.intc_irq_prios;
|
|
|
u32 *host_map = davinci_soc_info.intc_host_map;
|
|
|
unsigned num_reg = BITS_TO_LONGS(num_irq);
|
|
|
- int i;
|
|
|
+ int i, irq_base;
|
|
|
|
|
|
davinci_intc_type = DAVINCI_INTC_TYPE_CP_INTC;
|
|
|
davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_8K);
|
|
|
if (WARN_ON(!davinci_intc_base))
|
|
|
- return;
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
cp_intc_write(0, CP_INTC_GLOBAL_ENABLE);
|
|
|
|
|
@@ -165,13 +185,28 @@ void __init cp_intc_init(void)
|
|
|
for (i = 0; host_map[i] != -1; i++)
|
|
|
cp_intc_write(host_map[i], CP_INTC_HOST_MAP(i));
|
|
|
|
|
|
- /* Set up genirq dispatching for cp_intc */
|
|
|
- for (i = 0; i < num_irq; i++) {
|
|
|
- irq_set_chip(i, &cp_intc_irq_chip);
|
|
|
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
|
|
|
- irq_set_handler(i, handle_edge_irq);
|
|
|
+ irq_base = irq_alloc_descs(-1, 0, num_irq, 0);
|
|
|
+ if (irq_base < 0) {
|
|
|
+ pr_warn("Couldn't allocate IRQ numbers\n");
|
|
|
+ irq_base = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* create a legacy host */
|
|
|
+ cp_intc_domain = irq_domain_add_legacy(node, num_irq,
|
|
|
+ irq_base, 0, &cp_intc_host_ops, NULL);
|
|
|
+
|
|
|
+ if (!cp_intc_domain) {
|
|
|
+ pr_err("cp_intc: failed to allocate irq host!\n");
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
/* Enable global interrupt */
|
|
|
cp_intc_write(1, CP_INTC_GLOBAL_ENABLE);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void __init cp_intc_init(void)
|
|
|
+{
|
|
|
+ __cp_intc_init(NULL);
|
|
|
}
|