interrupt.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*
  2. * Interrupt Entries
  3. *
  4. * Copyright 2005-2009 Analog Devices Inc.
  5. * D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
  6. * Kenneth Albanowski <kjahds@kjahds.com>
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. #include <asm/blackfin.h>
  11. #include <mach/irq.h>
  12. #include <linux/linkage.h>
  13. #include <asm/entry.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/trace.h>
  16. #include <asm/traps.h>
  17. #include <asm/thread_info.h>
  18. #include <asm/context.S>
  19. .extern _ret_from_exception
  20. #ifdef CONFIG_I_ENTRY_L1
  21. .section .l1.text
  22. #else
  23. .text
  24. #endif
  25. .align 4 /* just in case */
  26. /* Common interrupt entry code. First we do CLI, then push
  27. * RETI, to keep interrupts disabled, but to allow this state to be changed
  28. * by local_bh_enable.
  29. * R0 contains the interrupt number, while R1 may contain the value of IPEND,
  30. * or garbage if IPEND won't be needed by the ISR. */
  31. __common_int_entry:
  32. [--sp] = fp;
  33. [--sp] = usp;
  34. [--sp] = i0;
  35. [--sp] = i1;
  36. [--sp] = i2;
  37. [--sp] = i3;
  38. [--sp] = m0;
  39. [--sp] = m1;
  40. [--sp] = m2;
  41. [--sp] = m3;
  42. [--sp] = l0;
  43. [--sp] = l1;
  44. [--sp] = l2;
  45. [--sp] = l3;
  46. [--sp] = b0;
  47. [--sp] = b1;
  48. [--sp] = b2;
  49. [--sp] = b3;
  50. [--sp] = a0.x;
  51. [--sp] = a0.w;
  52. [--sp] = a1.x;
  53. [--sp] = a1.w;
  54. [--sp] = LC0;
  55. [--sp] = LC1;
  56. [--sp] = LT0;
  57. [--sp] = LT1;
  58. [--sp] = LB0;
  59. [--sp] = LB1;
  60. [--sp] = ASTAT;
  61. [--sp] = r0; /* Skip reserved */
  62. [--sp] = RETS;
  63. r2 = RETI;
  64. [--sp] = r2;
  65. [--sp] = RETX;
  66. [--sp] = RETN;
  67. [--sp] = RETE;
  68. [--sp] = SEQSTAT;
  69. [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
  70. /* Switch to other method of keeping interrupts disabled. */
  71. #ifdef CONFIG_DEBUG_HWERR
  72. r1 = 0x3f;
  73. sti r1;
  74. #else
  75. cli r1;
  76. #endif
  77. [--sp] = RETI; /* orig_pc */
  78. /* Clear all L registers. */
  79. r1 = 0 (x);
  80. l0 = r1;
  81. l1 = r1;
  82. l2 = r1;
  83. l3 = r1;
  84. #ifdef CONFIG_FRAME_POINTER
  85. fp = 0;
  86. #endif
  87. ANOMALY_283_315_WORKAROUND(p5, r7)
  88. r1 = sp;
  89. SP += -12;
  90. #ifdef CONFIG_IPIPE
  91. call ___ipipe_grab_irq
  92. SP += 12;
  93. cc = r0 == 0;
  94. if cc jump .Lcommon_restore_context;
  95. #else /* CONFIG_IPIPE */
  96. call _do_irq;
  97. SP += 12;
  98. #endif /* CONFIG_IPIPE */
  99. call _return_from_int;
  100. .Lcommon_restore_context:
  101. RESTORE_CONTEXT
  102. rti;
  103. /* interrupt routine for ivhw - 5 */
  104. ENTRY(_evt_ivhw)
  105. /* In case a single action kicks off multiple memory transactions, (like
  106. * a cache line fetch, - this can cause multiple hardware errors, let's
  107. * catch them all. First - make sure all the actions are complete, and
  108. * the core sees the hardware errors.
  109. */
  110. SSYNC;
  111. SSYNC;
  112. SAVE_ALL_SYS
  113. #ifdef CONFIG_FRAME_POINTER
  114. fp = 0;
  115. #endif
  116. ANOMALY_283_315_WORKAROUND(p5, r7)
  117. /* Handle all stacked hardware errors
  118. * To make sure we don't hang forever, only do it 10 times
  119. */
  120. R0 = 0;
  121. R2 = 10;
  122. 1:
  123. P0.L = LO(ILAT);
  124. P0.H = HI(ILAT);
  125. R1 = [P0];
  126. CC = BITTST(R1, EVT_IVHW_P);
  127. IF ! CC JUMP 2f;
  128. /* OK a hardware error is pending - clear it */
  129. R1 = EVT_IVHW_P;
  130. [P0] = R1;
  131. R0 += 1;
  132. CC = R1 == R2;
  133. if CC JUMP 2f;
  134. JUMP 1b;
  135. 2:
  136. # We are going to dump something out, so make sure we print IPEND properly
  137. p2.l = lo(IPEND);
  138. p2.h = hi(IPEND);
  139. r0 = [p2];
  140. [sp + PT_IPEND] = r0;
  141. /* set the EXCAUSE to HWERR for trap_c */
  142. r0 = [sp + PT_SEQSTAT];
  143. R1.L = LO(VEC_HWERR);
  144. R1.H = HI(VEC_HWERR);
  145. R0 = R0 | R1;
  146. [sp + PT_SEQSTAT] = R0;
  147. r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
  148. SP += -12;
  149. call _trap_c;
  150. SP += 12;
  151. #ifdef EBIU_ERRMST
  152. /* make sure EBIU_ERRMST is clear */
  153. p0.l = LO(EBIU_ERRMST);
  154. p0.h = HI(EBIU_ERRMST);
  155. r0.l = (CORE_ERROR | CORE_MERROR);
  156. w[p0] = r0.l;
  157. #endif
  158. call _ret_from_exception;
  159. .Lcommon_restore_all_sys:
  160. RESTORE_ALL_SYS
  161. rti;
  162. ENDPROC(_evt_ivhw)
  163. /* Interrupt routine for evt2 (NMI).
  164. * We don't actually use this, so just return.
  165. * For inner circle type details, please see:
  166. * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
  167. */
  168. ENTRY(_evt_nmi)
  169. .weak _evt_nmi
  170. rtn;
  171. ENDPROC(_evt_nmi)
  172. /* interrupt routine for core timer - 6 */
  173. ENTRY(_evt_timer)
  174. TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
  175. /* interrupt routine for evt7 - 7 */
  176. ENTRY(_evt_evt7)
  177. INTERRUPT_ENTRY(EVT_IVG7_P)
  178. ENTRY(_evt_evt8)
  179. INTERRUPT_ENTRY(EVT_IVG8_P)
  180. ENTRY(_evt_evt9)
  181. INTERRUPT_ENTRY(EVT_IVG9_P)
  182. ENTRY(_evt_evt10)
  183. INTERRUPT_ENTRY(EVT_IVG10_P)
  184. ENTRY(_evt_evt11)
  185. INTERRUPT_ENTRY(EVT_IVG11_P)
  186. ENTRY(_evt_evt12)
  187. INTERRUPT_ENTRY(EVT_IVG12_P)
  188. ENTRY(_evt_evt13)
  189. INTERRUPT_ENTRY(EVT_IVG13_P)
  190. /* interrupt routine for system_call - 15 */
  191. ENTRY(_evt_system_call)
  192. SAVE_CONTEXT_SYSCALL
  193. #ifdef CONFIG_FRAME_POINTER
  194. fp = 0;
  195. #endif
  196. call _system_call;
  197. jump .Lcommon_restore_context;
  198. ENDPROC(_evt_system_call)
  199. #ifdef CONFIG_IPIPE
  200. /*
  201. * __ipipe_call_irqtail: lowers the current priority level to EVT15
  202. * before running a user-defined routine, then raises the priority
  203. * level to EVT14 to prepare the caller for a normal interrupt
  204. * return through RTI.
  205. *
  206. * We currently use this facility in two occasions:
  207. *
  208. * - to branch to __ipipe_irq_tail_hook as requested by a high
  209. * priority domain after the pipeline delivered an interrupt,
  210. * e.g. such as Xenomai, in order to start its rescheduling
  211. * procedure, since we may not switch tasks when IRQ levels are
  212. * nested on the Blackfin, so we have to fake an interrupt return
  213. * so that we may reschedule immediately.
  214. *
  215. * - to branch to sync_root_irqs, in order to play any interrupt
  216. * pending for the root domain (i.e. the Linux kernel). This lowers
  217. * the core priority level enough so that Linux IRQ handlers may
  218. * never delay interrupts handled by high priority domains; we defer
  219. * those handlers until this point instead. This is a substitute
  220. * to using a threaded interrupt model for the Linux kernel.
  221. *
  222. * r0: address of user-defined routine
  223. * context: caller must have preempted EVT15, hw interrupts must be off.
  224. */
  225. ENTRY(___ipipe_call_irqtail)
  226. p0 = r0;
  227. r0.l = 1f;
  228. r0.h = 1f;
  229. reti = r0;
  230. rti;
  231. 1:
  232. [--sp] = rets;
  233. [--sp] = ( r7:4, p5:3 );
  234. sp += -12;
  235. call (p0);
  236. sp += 12;
  237. ( r7:4, p5:3 ) = [sp++];
  238. rets = [sp++];
  239. #ifdef CONFIG_DEBUG_HWERR
  240. /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
  241. r0 = (EVT_IVG14 | EVT_IVHW | \
  242. EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  243. #else
  244. /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
  245. r0 = (EVT_IVG14 | \
  246. EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  247. #endif
  248. sti r0;
  249. raise 14; /* Branches to _evt_evt14 */
  250. 2:
  251. jump 2b; /* Likely paranoid. */
  252. ENDPROC(___ipipe_call_irqtail)
  253. #endif /* CONFIG_IPIPE */