irq.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irq.h>
  18. #include <linux/kernel_stat.h>
  19. #include <linux/uaccess.h>
  20. #include <hv/drv_pcie_rc_intf.h>
  21. #include <arch/spr_def.h>
  22. #include <asm/traps.h>
  23. /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
  24. #define IS_HW_CLEARED 1
  25. /*
  26. * The set of interrupts we enable for arch_local_irq_enable().
  27. * This is initialized to have just a single interrupt that the kernel
  28. * doesn't actually use as a sentinel. During kernel init,
  29. * interrupts are added as the kernel gets prepared to support them.
  30. * NOTE: we could probably initialize them all statically up front.
  31. */
  32. DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
  33. INITIAL_INTERRUPTS_ENABLED;
  34. EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
  35. /* Define per-tile device interrupt statistics state. */
  36. DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
  37. EXPORT_PER_CPU_SYMBOL(irq_stat);
  38. /*
  39. * Define per-tile irq disable mask; the hardware/HV only has a single
  40. * mask that we use to implement both masking and disabling.
  41. */
  42. static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
  43. ____cacheline_internodealigned_in_smp;
  44. /*
  45. * Per-tile IRQ nesting depth. Used to make sure we enable newly
  46. * enabled IRQs before exiting the outermost interrupt.
  47. */
  48. static DEFINE_PER_CPU(int, irq_depth);
  49. /* State for allocating IRQs on Gx. */
  50. #if CHIP_HAS_IPI()
  51. static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) &
  52. (~(1UL << IRQ_RESCHEDULE));
  53. static DEFINE_SPINLOCK(available_irqs_lock);
  54. #endif
  55. #if CHIP_HAS_IPI()
  56. /* Use SPRs to manipulate device interrupts. */
  57. #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
  58. #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
  59. #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
  60. #else
  61. /* Use HV to manipulate device interrupts. */
  62. #define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
  63. #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
  64. #define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
  65. #endif
  66. /*
  67. * The interrupt handling path, implemented in terms of HV interrupt
  68. * emulation on TILEPro, and IPI hardware on TILE-Gx.
  69. * Entered with interrupts disabled.
  70. */
  71. void tile_dev_intr(struct pt_regs *regs, int intnum)
  72. {
  73. int depth = __get_cpu_var(irq_depth)++;
  74. unsigned long original_irqs;
  75. unsigned long remaining_irqs;
  76. struct pt_regs *old_regs;
  77. #if CHIP_HAS_IPI()
  78. /*
  79. * Pending interrupts are listed in an SPR. We might be
  80. * nested, so be sure to only handle irqs that weren't already
  81. * masked by a previous interrupt. Then, mask out the ones
  82. * we're going to handle.
  83. */
  84. unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
  85. original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
  86. __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
  87. #else
  88. /*
  89. * Hypervisor performs the equivalent of the Gx code above and
  90. * then puts the pending interrupt mask into a system save reg
  91. * for us to find.
  92. */
  93. original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
  94. #endif
  95. remaining_irqs = original_irqs;
  96. /* Track time spent here in an interrupt context. */
  97. old_regs = set_irq_regs(regs);
  98. irq_enter();
  99. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  100. /* Debugging check for stack overflow: less than 1/8th stack free? */
  101. {
  102. long sp = stack_pointer - (long) current_thread_info();
  103. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  104. pr_emerg("tile_dev_intr: "
  105. "stack overflow: %ld\n",
  106. sp - sizeof(struct thread_info));
  107. dump_stack();
  108. }
  109. }
  110. #endif
  111. while (remaining_irqs) {
  112. unsigned long irq = __ffs(remaining_irqs);
  113. remaining_irqs &= ~(1UL << irq);
  114. /* Count device irqs; Linux IPIs are counted elsewhere. */
  115. if (irq != IRQ_RESCHEDULE)
  116. __get_cpu_var(irq_stat).irq_dev_intr_count++;
  117. generic_handle_irq(irq);
  118. }
  119. /*
  120. * If we weren't nested, turn on all enabled interrupts,
  121. * including any that were reenabled during interrupt
  122. * handling.
  123. */
  124. if (depth == 0)
  125. unmask_irqs(~__get_cpu_var(irq_disable_mask));
  126. __get_cpu_var(irq_depth)--;
  127. /*
  128. * Track time spent against the current process again and
  129. * process any softirqs if they are waiting.
  130. */
  131. irq_exit();
  132. set_irq_regs(old_regs);
  133. }
  134. /*
  135. * Remove an irq from the disabled mask. If we're in an interrupt
  136. * context, defer enabling the HW interrupt until we leave.
  137. */
  138. static void tile_irq_chip_enable(struct irq_data *d)
  139. {
  140. get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
  141. if (__get_cpu_var(irq_depth) == 0)
  142. unmask_irqs(1UL << d->irq);
  143. put_cpu_var(irq_disable_mask);
  144. }
  145. /*
  146. * Add an irq to the disabled mask. We disable the HW interrupt
  147. * immediately so that there's no possibility of it firing. If we're
  148. * in an interrupt context, the return path is careful to avoid
  149. * unmasking a newly disabled interrupt.
  150. */
  151. static void tile_irq_chip_disable(struct irq_data *d)
  152. {
  153. get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
  154. mask_irqs(1UL << d->irq);
  155. put_cpu_var(irq_disable_mask);
  156. }
  157. /* Mask an interrupt. */
  158. static void tile_irq_chip_mask(struct irq_data *d)
  159. {
  160. mask_irqs(1UL << d->irq);
  161. }
  162. /* Unmask an interrupt. */
  163. static void tile_irq_chip_unmask(struct irq_data *d)
  164. {
  165. unmask_irqs(1UL << d->irq);
  166. }
  167. /*
  168. * Clear an interrupt before processing it so that any new assertions
  169. * will trigger another irq.
  170. */
  171. static void tile_irq_chip_ack(struct irq_data *d)
  172. {
  173. if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
  174. clear_irqs(1UL << d->irq);
  175. }
  176. /*
  177. * For per-cpu interrupts, we need to avoid unmasking any interrupts
  178. * that we disabled via disable_percpu_irq().
  179. */
  180. static void tile_irq_chip_eoi(struct irq_data *d)
  181. {
  182. if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
  183. unmask_irqs(1UL << d->irq);
  184. }
  185. static struct irq_chip tile_irq_chip = {
  186. .name = "tile_irq_chip",
  187. .irq_enable = tile_irq_chip_enable,
  188. .irq_disable = tile_irq_chip_disable,
  189. .irq_ack = tile_irq_chip_ack,
  190. .irq_eoi = tile_irq_chip_eoi,
  191. .irq_mask = tile_irq_chip_mask,
  192. .irq_unmask = tile_irq_chip_unmask,
  193. };
  194. void __init init_IRQ(void)
  195. {
  196. ipi_init();
  197. }
  198. void setup_irq_regs(void)
  199. {
  200. /* Enable interrupt delivery. */
  201. unmask_irqs(~0UL);
  202. #if CHIP_HAS_IPI()
  203. arch_local_irq_unmask(INT_IPI_K);
  204. #endif
  205. }
  206. void tile_irq_activate(unsigned int irq, int tile_irq_type)
  207. {
  208. /*
  209. * We use handle_level_irq() by default because the pending
  210. * interrupt vector (whether modeled by the HV on
  211. * TILEPro or implemented in hardware on TILE-Gx) has
  212. * level-style semantics for each bit. An interrupt fires
  213. * whenever a bit is high, not just at edges.
  214. */
  215. irq_flow_handler_t handle = handle_level_irq;
  216. if (tile_irq_type == TILE_IRQ_PERCPU)
  217. handle = handle_percpu_irq;
  218. irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
  219. /*
  220. * Flag interrupts that are hardware-cleared so that ack()
  221. * won't clear them.
  222. */
  223. if (tile_irq_type == TILE_IRQ_HW_CLEAR)
  224. irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
  225. }
  226. EXPORT_SYMBOL(tile_irq_activate);
  227. void ack_bad_irq(unsigned int irq)
  228. {
  229. pr_err("unexpected IRQ trap at vector %02x\n", irq);
  230. }
  231. /*
  232. * Generic, controller-independent functions:
  233. */
  234. #if CHIP_HAS_IPI()
  235. int create_irq(void)
  236. {
  237. unsigned long flags;
  238. int result;
  239. spin_lock_irqsave(&available_irqs_lock, flags);
  240. if (available_irqs == 0)
  241. result = -ENOMEM;
  242. else {
  243. result = __ffs(available_irqs);
  244. available_irqs &= ~(1UL << result);
  245. dynamic_irq_init(result);
  246. }
  247. spin_unlock_irqrestore(&available_irqs_lock, flags);
  248. return result;
  249. }
  250. EXPORT_SYMBOL(create_irq);
  251. void destroy_irq(unsigned int irq)
  252. {
  253. unsigned long flags;
  254. spin_lock_irqsave(&available_irqs_lock, flags);
  255. available_irqs |= (1UL << irq);
  256. dynamic_irq_cleanup(irq);
  257. spin_unlock_irqrestore(&available_irqs_lock, flags);
  258. }
  259. EXPORT_SYMBOL(destroy_irq);
  260. #endif