irq.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * linux/arch/xtensa/kernel/irq.c
  3. *
  4. * Xtensa built-in interrupt controller and some generic functions copied
  5. * from i386.
  6. *
  7. * Copyright (C) 2002 - 2006 Tensilica, Inc.
  8. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  9. *
  10. *
  11. * Chris Zankel <chris@zankel.net>
  12. * Kevin Chea
  13. *
  14. */
  15. #include <linux/module.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/irq.h>
  19. #include <linux/kernel_stat.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/platform.h>
  22. static unsigned int cached_irq_mask;
  23. atomic_t irq_err_count;
  24. /*
  25. * do_IRQ handles all normal device IRQ's (the special
  26. * SMP cross-CPU interrupts have their own specific
  27. * handlers).
  28. */
  29. asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
  30. {
  31. struct pt_regs *old_regs = set_irq_regs(regs);
  32. struct irq_desc *desc = irq_desc + irq;
  33. if (irq >= NR_IRQS) {
  34. printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
  35. __func__, irq);
  36. }
  37. irq_enter();
  38. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  39. /* Debugging check for stack overflow: is there less than 1KB free? */
  40. {
  41. unsigned long sp;
  42. __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
  43. sp &= THREAD_SIZE - 1;
  44. if (unlikely(sp < (sizeof(thread_info) + 1024)))
  45. printk("Stack overflow in do_IRQ: %ld\n",
  46. sp - sizeof(struct thread_info));
  47. }
  48. #endif
  49. desc->handle_irq(irq, desc);
  50. irq_exit();
  51. set_irq_regs(old_regs);
  52. }
  53. /*
  54. * Generic, controller-independent functions:
  55. */
  56. int show_interrupts(struct seq_file *p, void *v)
  57. {
  58. int i = *(loff_t *) v, j;
  59. struct irqaction * action;
  60. unsigned long flags;
  61. if (i == 0) {
  62. seq_printf(p, " ");
  63. for_each_online_cpu(j)
  64. seq_printf(p, "CPU%d ",j);
  65. seq_putc(p, '\n');
  66. }
  67. if (i < NR_IRQS) {
  68. raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
  69. action = irq_desc[i].action;
  70. if (!action)
  71. goto skip;
  72. seq_printf(p, "%3d: ",i);
  73. #ifndef CONFIG_SMP
  74. seq_printf(p, "%10u ", kstat_irqs(i));
  75. #else
  76. for_each_online_cpu(j)
  77. seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  78. #endif
  79. seq_printf(p, " %14s", irq_desc[i].chip->name);
  80. seq_printf(p, " %s", action->name);
  81. for (action=action->next; action; action = action->next)
  82. seq_printf(p, ", %s", action->name);
  83. seq_putc(p, '\n');
  84. skip:
  85. raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  86. } else if (i == NR_IRQS) {
  87. seq_printf(p, "NMI: ");
  88. for_each_online_cpu(j)
  89. seq_printf(p, "%10u ", nmi_count(j));
  90. seq_putc(p, '\n');
  91. seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  92. }
  93. return 0;
  94. }
  95. static void xtensa_irq_mask(unsigned int irq)
  96. {
  97. cached_irq_mask &= ~(1 << irq);
  98. set_sr (cached_irq_mask, INTENABLE);
  99. }
  100. static void xtensa_irq_unmask(unsigned int irq)
  101. {
  102. cached_irq_mask |= 1 << irq;
  103. set_sr (cached_irq_mask, INTENABLE);
  104. }
  105. static void xtensa_irq_enable(unsigned int irq)
  106. {
  107. variant_irq_enable(irq);
  108. xtensa_irq_unmask(irq);
  109. }
  110. static void xtensa_irq_disable(unsigned int irq)
  111. {
  112. xtensa_irq_mask(irq);
  113. variant_irq_disable(irq);
  114. }
  115. static void xtensa_irq_ack(unsigned int irq)
  116. {
  117. set_sr(1 << irq, INTCLEAR);
  118. }
  119. static int xtensa_irq_retrigger(unsigned int irq)
  120. {
  121. set_sr (1 << irq, INTSET);
  122. return 1;
  123. }
  124. static struct irq_chip xtensa_irq_chip = {
  125. .name = "xtensa",
  126. .enable = xtensa_irq_enable,
  127. .disable = xtensa_irq_disable,
  128. .mask = xtensa_irq_mask,
  129. .unmask = xtensa_irq_unmask,
  130. .ack = xtensa_irq_ack,
  131. .retrigger = xtensa_irq_retrigger,
  132. };
  133. void __init init_IRQ(void)
  134. {
  135. int index;
  136. for (index = 0; index < XTENSA_NR_IRQS; index++) {
  137. int mask = 1 << index;
  138. if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
  139. set_irq_chip_and_handler(index, &xtensa_irq_chip,
  140. handle_simple_irq);
  141. else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
  142. set_irq_chip_and_handler(index, &xtensa_irq_chip,
  143. handle_edge_irq);
  144. else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
  145. set_irq_chip_and_handler(index, &xtensa_irq_chip,
  146. handle_level_irq);
  147. else if (mask & XCHAL_INTTYPE_MASK_TIMER)
  148. set_irq_chip_and_handler(index, &xtensa_irq_chip,
  149. handle_edge_irq);
  150. else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
  151. /* XCHAL_INTTYPE_MASK_NMI */
  152. set_irq_chip_and_handler(index, &xtensa_irq_chip,
  153. handle_level_irq);
  154. }
  155. cached_irq_mask = 0;
  156. variant_init_irq();
  157. }