irqflags.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Copyright (C) 2000 MIPS Technologies, Inc.
  10. */
  11. #ifndef _ASM_IRQFLAGS_H
  12. #define _ASM_IRQFLAGS_H
  13. #ifndef __ASSEMBLY__
  14. #include <linux/compiler.h>
  15. #include <asm/hazards.h>
  16. __asm__(
  17. " .macro raw_local_irq_enable \n"
  18. " .set push \n"
  19. " .set reorder \n"
  20. " .set noat \n"
  21. #ifdef CONFIG_MIPS_MT_SMTC
  22. " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
  23. " ori $1, 0x400 \n"
  24. " xori $1, 0x400 \n"
  25. " mtc0 $1, $2, 1 \n"
  26. #elif defined(CONFIG_CPU_MIPSR2)
  27. " ei \n"
  28. #else
  29. " mfc0 $1,$12 \n"
  30. " ori $1,0x1f \n"
  31. " xori $1,0x1e \n"
  32. " mtc0 $1,$12 \n"
  33. #endif
  34. " irq_enable_hazard \n"
  35. " .set pop \n"
  36. " .endm");
  37. static inline void raw_local_irq_enable(void)
  38. {
  39. __asm__ __volatile__(
  40. "raw_local_irq_enable"
  41. : /* no outputs */
  42. : /* no inputs */
  43. : "memory");
  44. }
  45. /*
  46. * For cli() we have to insert nops to make sure that the new value
  47. * has actually arrived in the status register before the end of this
  48. * macro.
  49. * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
  50. * no nops at all.
  51. */
  52. /*
  53. * For TX49, operating only IE bit is not enough.
  54. *
  55. * If mfc0 $12 follows store and the mfc0 is last instruction of a
  56. * page and fetching the next instruction causes TLB miss, the result
  57. * of the mfc0 might wrongly contain EXL bit.
  58. *
  59. * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
  60. *
  61. * Workaround: mask EXL bit of the result or place a nop before mfc0.
  62. */
  63. __asm__(
  64. " .macro raw_local_irq_disable\n"
  65. " .set push \n"
  66. " .set noat \n"
  67. #ifdef CONFIG_MIPS_MT_SMTC
  68. " mfc0 $1, $2, 1 \n"
  69. " ori $1, 0x400 \n"
  70. " .set noreorder \n"
  71. " mtc0 $1, $2, 1 \n"
  72. #elif defined(CONFIG_CPU_MIPSR2)
  73. " di \n"
  74. #else
  75. " mfc0 $1,$12 \n"
  76. " ori $1,0x1f \n"
  77. " xori $1,0x1f \n"
  78. " .set noreorder \n"
  79. " mtc0 $1,$12 \n"
  80. #endif
  81. " irq_disable_hazard \n"
  82. " .set pop \n"
  83. " .endm \n");
  84. static inline void raw_local_irq_disable(void)
  85. {
  86. __asm__ __volatile__(
  87. "raw_local_irq_disable"
  88. : /* no outputs */
  89. : /* no inputs */
  90. : "memory");
  91. }
  92. __asm__(
  93. " .macro raw_local_save_flags flags \n"
  94. " .set push \n"
  95. " .set reorder \n"
  96. #ifdef CONFIG_MIPS_MT_SMTC
  97. " mfc0 \\flags, $2, 1 \n"
  98. #else
  99. " mfc0 \\flags, $12 \n"
  100. #endif
  101. " .set pop \n"
  102. " .endm \n");
  103. #define raw_local_save_flags(x) \
  104. __asm__ __volatile__( \
  105. "raw_local_save_flags %0" \
  106. : "=r" (x))
  107. __asm__(
  108. " .macro raw_local_irq_save result \n"
  109. " .set push \n"
  110. " .set reorder \n"
  111. " .set noat \n"
  112. #ifdef CONFIG_MIPS_MT_SMTC
  113. " mfc0 \\result, $2, 1 \n"
  114. " ori $1, \\result, 0x400 \n"
  115. " .set noreorder \n"
  116. " mtc0 $1, $2, 1 \n"
  117. " andi \\result, \\result, 0x400 \n"
  118. #elif defined(CONFIG_CPU_MIPSR2)
  119. " di \\result \n"
  120. " andi \\result, 1 \n"
  121. #else
  122. " mfc0 \\result, $12 \n"
  123. " ori $1, \\result, 0x1f \n"
  124. " xori $1, 0x1f \n"
  125. " .set noreorder \n"
  126. " mtc0 $1, $12 \n"
  127. #endif
  128. " irq_disable_hazard \n"
  129. " .set pop \n"
  130. " .endm \n");
  131. #define raw_local_irq_save(x) \
  132. __asm__ __volatile__( \
  133. "raw_local_irq_save\t%0" \
  134. : "=r" (x) \
  135. : /* no inputs */ \
  136. : "memory")
  137. __asm__(
  138. " .macro raw_local_irq_restore flags \n"
  139. " .set push \n"
  140. " .set noreorder \n"
  141. " .set noat \n"
  142. #ifdef CONFIG_MIPS_MT_SMTC
  143. "mfc0 $1, $2, 1 \n"
  144. "andi \\flags, 0x400 \n"
  145. "ori $1, 0x400 \n"
  146. "xori $1, 0x400 \n"
  147. "or \\flags, $1 \n"
  148. "mtc0 \\flags, $2, 1 \n"
  149. #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
  150. /*
  151. * Slow, but doesn't suffer from a relativly unlikely race
  152. * condition we're having since days 1.
  153. */
  154. " beqz \\flags, 1f \n"
  155. " di \n"
  156. " ei \n"
  157. "1: \n"
  158. #elif defined(CONFIG_CPU_MIPSR2)
  159. /*
  160. * Fast, dangerous. Life is fun, life is good.
  161. */
  162. " mfc0 $1, $12 \n"
  163. " ins $1, \\flags, 0, 1 \n"
  164. " mtc0 $1, $12 \n"
  165. #else
  166. " mfc0 $1, $12 \n"
  167. " andi \\flags, 1 \n"
  168. " ori $1, 0x1f \n"
  169. " xori $1, 0x1f \n"
  170. " or \\flags, $1 \n"
  171. " mtc0 \\flags, $12 \n"
  172. #endif
  173. " irq_disable_hazard \n"
  174. " .set pop \n"
  175. " .endm \n");
  176. extern void smtc_ipi_replay(void);
  177. static inline void raw_local_irq_restore(unsigned long flags)
  178. {
  179. unsigned long __tmp1;
  180. #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
  181. /*
  182. * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
  183. * IPIs, at the cost of branch and call overhead on each
  184. * local_irq_restore()
  185. */
  186. if (unlikely(!(flags & 0x0400)))
  187. smtc_ipi_replay();
  188. #endif
  189. __asm__ __volatile__(
  190. "raw_local_irq_restore\t%0"
  191. : "=r" (__tmp1)
  192. : "0" (flags)
  193. : "memory");
  194. }
  195. static inline int raw_irqs_disabled_flags(unsigned long flags)
  196. {
  197. #ifdef CONFIG_MIPS_MT_SMTC
  198. /*
  199. * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
  200. */
  201. return flags & 0x400;
  202. #else
  203. return !(flags & 1);
  204. #endif
  205. }
  206. #endif
  207. /*
  208. * Do the CPU's IRQ-state tracing from assembly code.
  209. */
  210. #ifdef CONFIG_TRACE_IRQFLAGS
  211. /* Reload some registers clobbered by trace_hardirqs_on */
  212. #ifdef CONFIG_64BIT
  213. # define TRACE_IRQS_RELOAD_REGS \
  214. LONG_L $11, PT_R11(sp); \
  215. LONG_L $10, PT_R10(sp); \
  216. LONG_L $9, PT_R9(sp); \
  217. LONG_L $8, PT_R8(sp); \
  218. LONG_L $7, PT_R7(sp); \
  219. LONG_L $6, PT_R6(sp); \
  220. LONG_L $5, PT_R5(sp); \
  221. LONG_L $4, PT_R4(sp); \
  222. LONG_L $2, PT_R2(sp)
  223. #else
  224. # define TRACE_IRQS_RELOAD_REGS \
  225. LONG_L $7, PT_R7(sp); \
  226. LONG_L $6, PT_R6(sp); \
  227. LONG_L $5, PT_R5(sp); \
  228. LONG_L $4, PT_R4(sp); \
  229. LONG_L $2, PT_R2(sp)
  230. #endif
  231. # define TRACE_IRQS_ON \
  232. CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
  233. jal trace_hardirqs_on
  234. # define TRACE_IRQS_ON_RELOAD \
  235. TRACE_IRQS_ON; \
  236. TRACE_IRQS_RELOAD_REGS
  237. # define TRACE_IRQS_OFF \
  238. jal trace_hardirqs_off
  239. #else
  240. # define TRACE_IRQS_ON
  241. # define TRACE_IRQS_ON_RELOAD
  242. # define TRACE_IRQS_OFF
  243. #endif
  244. #endif /* _ASM_IRQFLAGS_H */