irq.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /*
  2. * linux/arch/xtensa/kernel/irq.c
  3. *
  4. * Xtensa built-in interrupt controller and some generic functions copied
  5. * from i386.
  6. *
  7. * Copyright (C) 2002 - 2006 Tensilica, Inc.
  8. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  9. *
  10. *
  11. * Chris Zankel <chris@zankel.net>
  12. * Kevin Chea
  13. *
  14. */
  15. #include <linux/module.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/irq.h>
  19. #include <linux/kernel_stat.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/platform.h>
  22. static unsigned int cached_irq_mask;
  23. atomic_t irq_err_count;
  24. /*
  25. * do_IRQ handles all normal device IRQ's (the special
  26. * SMP cross-CPU interrupts have their own specific
  27. * handlers).
  28. */
  29. asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
  30. {
  31. struct pt_regs *old_regs = set_irq_regs(regs);
  32. if (irq >= NR_IRQS) {
  33. printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
  34. __func__, irq);
  35. }
  36. irq_enter();
  37. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  38. /* Debugging check for stack overflow: is there less than 1KB free? */
  39. {
  40. unsigned long sp;
  41. __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
  42. sp &= THREAD_SIZE - 1;
  43. if (unlikely(sp < (sizeof(thread_info) + 1024)))
  44. printk("Stack overflow in do_IRQ: %ld\n",
  45. sp - sizeof(struct thread_info));
  46. }
  47. #endif
  48. generic_handle_irq(irq);
  49. irq_exit();
  50. set_irq_regs(old_regs);
  51. }
  52. /*
  53. * Generic, controller-independent functions:
  54. */
  55. int show_interrupts(struct seq_file *p, void *v)
  56. {
  57. int i = *(loff_t *) v, j;
  58. struct irqaction * action;
  59. unsigned long flags;
  60. if (i == 0) {
  61. seq_printf(p, " ");
  62. for_each_online_cpu(j)
  63. seq_printf(p, "CPU%d ",j);
  64. seq_putc(p, '\n');
  65. }
  66. if (i < NR_IRQS) {
  67. raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
  68. action = irq_desc[i].action;
  69. if (!action)
  70. goto skip;
  71. seq_printf(p, "%3d: ",i);
  72. #ifndef CONFIG_SMP
  73. seq_printf(p, "%10u ", kstat_irqs(i));
  74. #else
  75. for_each_online_cpu(j)
  76. seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  77. #endif
  78. seq_printf(p, " %14s", irq_desc[i].chip->name);
  79. seq_printf(p, " %s", action->name);
  80. for (action=action->next; action; action = action->next)
  81. seq_printf(p, ", %s", action->name);
  82. seq_putc(p, '\n');
  83. skip:
  84. raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  85. } else if (i == NR_IRQS) {
  86. seq_printf(p, "NMI: ");
  87. for_each_online_cpu(j)
  88. seq_printf(p, "%10u ", nmi_count(j));
  89. seq_putc(p, '\n');
  90. seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  91. }
  92. return 0;
  93. }
  94. static void xtensa_irq_mask(struct irq_chip *d)
  95. {
  96. cached_irq_mask &= ~(1 << d->irq);
  97. set_sr (cached_irq_mask, INTENABLE);
  98. }
  99. static void xtensa_irq_unmask(struct irq_chip *d)
  100. {
  101. cached_irq_mask |= 1 << d->irq;
  102. set_sr (cached_irq_mask, INTENABLE);
  103. }
  104. static void xtensa_irq_enable(struct irq_chip *d)
  105. {
  106. variant_irq_enable(d->irq);
  107. xtensa_irq_unmask(d->irq);
  108. }
  109. static void xtensa_irq_disable(struct irq_chip *d)
  110. {
  111. xtensa_irq_mask(d->irq);
  112. variant_irq_disable(d->irq);
  113. }
  114. static void xtensa_irq_ack(struct irq_chip *d)
  115. {
  116. set_sr(1 << d->irq, INTCLEAR);
  117. }
  118. static int xtensa_irq_retrigger(struct irq_chip *d)
  119. {
  120. set_sr (1 << d->irq, INTSET);
  121. return 1;
  122. }
  123. static struct irq_chip xtensa_irq_chip = {
  124. .name = "xtensa",
  125. .irq_enable = xtensa_irq_enable,
  126. .irq_disable = xtensa_irq_disable,
  127. .irq_mask = xtensa_irq_mask,
  128. .irq_unmask = xtensa_irq_unmask,
  129. .irq_ack = xtensa_irq_ack,
  130. .irq_retrigger = xtensa_irq_retrigger,
  131. };
  132. void __init init_IRQ(void)
  133. {
  134. int index;
  135. for (index = 0; index < XTENSA_NR_IRQS; index++) {
  136. int mask = 1 << index;
  137. if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
  138. irq_set_chip_and_handler(index, &xtensa_irq_chip,
  139. handle_simple_irq);
  140. else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
  141. irq_set_chip_and_handler(index, &xtensa_irq_chip,
  142. handle_edge_irq);
  143. else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
  144. irq_set_chip_and_handler(index, &xtensa_irq_chip,
  145. handle_level_irq);
  146. else if (mask & XCHAL_INTTYPE_MASK_TIMER)
  147. irq_set_chip_and_handler(index, &xtensa_irq_chip,
  148. handle_edge_irq);
  149. else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
  150. /* XCHAL_INTTYPE_MASK_NMI */
  151. irq_set_chip_and_handler(index, &xtensa_irq_chip,
  152. handle_level_irq);
  153. }
  154. cached_irq_mask = 0;
  155. variant_init_irq();
  156. }