irqflags.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Copyright (C) 2000 MIPS Technologies, Inc.
  10. */
  11. #ifndef _ASM_IRQFLAGS_H
  12. #define _ASM_IRQFLAGS_H
  13. #ifndef __ASSEMBLY__
  14. #include <linux/compiler.h>
  15. #include <asm/hazards.h>
  16. __asm__(
  17. " .macro raw_local_irq_enable \n"
  18. " .set push \n"
  19. " .set reorder \n"
  20. " .set noat \n"
  21. #ifdef CONFIG_MIPS_MT_SMTC
  22. " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
  23. " ori $1, 0x400 \n"
  24. " xori $1, 0x400 \n"
  25. " mtc0 $1, $2, 1 \n"
  26. #elif defined(CONFIG_CPU_MIPSR2)
  27. " ei \n"
  28. #else
  29. " mfc0 $1,$12 \n"
  30. " ori $1,0x1f \n"
  31. " xori $1,0x1e \n"
  32. " mtc0 $1,$12 \n"
  33. #endif
  34. " irq_enable_hazard \n"
  35. " .set pop \n"
  36. " .endm");
  37. extern void smtc_ipi_replay(void);
  38. static inline void raw_local_irq_enable(void)
  39. {
  40. #ifdef CONFIG_MIPS_MT_SMTC
  41. /*
  42. * SMTC kernel needs to do a software replay of queued
  43. * IPIs, at the cost of call overhead on each local_irq_enable()
  44. */
  45. smtc_ipi_replay();
  46. #endif
  47. __asm__ __volatile__(
  48. "raw_local_irq_enable"
  49. : /* no outputs */
  50. : /* no inputs */
  51. : "memory");
  52. }
  53. /*
  54. * For cli() we have to insert nops to make sure that the new value
  55. * has actually arrived in the status register before the end of this
  56. * macro.
  57. * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
  58. * no nops at all.
  59. */
  60. /*
  61. * For TX49, operating only IE bit is not enough.
  62. *
  63. * If mfc0 $12 follows store and the mfc0 is last instruction of a
  64. * page and fetching the next instruction causes TLB miss, the result
  65. * of the mfc0 might wrongly contain EXL bit.
  66. *
  67. * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
  68. *
  69. * Workaround: mask EXL bit of the result or place a nop before mfc0.
  70. */
  71. __asm__(
  72. " .macro raw_local_irq_disable\n"
  73. " .set push \n"
  74. " .set noat \n"
  75. #ifdef CONFIG_MIPS_MT_SMTC
  76. " mfc0 $1, $2, 1 \n"
  77. " ori $1, 0x400 \n"
  78. " .set noreorder \n"
  79. " mtc0 $1, $2, 1 \n"
  80. #elif defined(CONFIG_CPU_MIPSR2)
  81. " di \n"
  82. #else
  83. " mfc0 $1,$12 \n"
  84. " ori $1,0x1f \n"
  85. " xori $1,0x1f \n"
  86. " .set noreorder \n"
  87. " mtc0 $1,$12 \n"
  88. #endif
  89. " irq_disable_hazard \n"
  90. " .set pop \n"
  91. " .endm \n");
  92. static inline void raw_local_irq_disable(void)
  93. {
  94. __asm__ __volatile__(
  95. "raw_local_irq_disable"
  96. : /* no outputs */
  97. : /* no inputs */
  98. : "memory");
  99. }
  100. __asm__(
  101. " .macro raw_local_save_flags flags \n"
  102. " .set push \n"
  103. " .set reorder \n"
  104. #ifdef CONFIG_MIPS_MT_SMTC
  105. " mfc0 \\flags, $2, 1 \n"
  106. #else
  107. " mfc0 \\flags, $12 \n"
  108. #endif
  109. " .set pop \n"
  110. " .endm \n");
  111. #define raw_local_save_flags(x) \
  112. __asm__ __volatile__( \
  113. "raw_local_save_flags %0" \
  114. : "=r" (x))
  115. __asm__(
  116. " .macro raw_local_irq_save result \n"
  117. " .set push \n"
  118. " .set reorder \n"
  119. " .set noat \n"
  120. #ifdef CONFIG_MIPS_MT_SMTC
  121. " mfc0 \\result, $2, 1 \n"
  122. " ori $1, \\result, 0x400 \n"
  123. " .set noreorder \n"
  124. " mtc0 $1, $2, 1 \n"
  125. " andi \\result, \\result, 0x400 \n"
  126. #elif defined(CONFIG_CPU_MIPSR2)
  127. " di \\result \n"
  128. " andi \\result, 1 \n"
  129. #else
  130. " mfc0 \\result, $12 \n"
  131. " ori $1, \\result, 0x1f \n"
  132. " xori $1, 0x1f \n"
  133. " .set noreorder \n"
  134. " mtc0 $1, $12 \n"
  135. #endif
  136. " irq_disable_hazard \n"
  137. " .set pop \n"
  138. " .endm \n");
  139. #define raw_local_irq_save(x) \
  140. __asm__ __volatile__( \
  141. "raw_local_irq_save\t%0" \
  142. : "=r" (x) \
  143. : /* no inputs */ \
  144. : "memory")
  145. __asm__(
  146. " .macro raw_local_irq_restore flags \n"
  147. " .set push \n"
  148. " .set noreorder \n"
  149. " .set noat \n"
  150. #ifdef CONFIG_MIPS_MT_SMTC
  151. "mfc0 $1, $2, 1 \n"
  152. "andi \\flags, 0x400 \n"
  153. "ori $1, 0x400 \n"
  154. "xori $1, 0x400 \n"
  155. "or \\flags, $1 \n"
  156. "mtc0 \\flags, $2, 1 \n"
  157. #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
  158. /*
  159. * Slow, but doesn't suffer from a relativly unlikely race
  160. * condition we're having since days 1.
  161. */
  162. " beqz \\flags, 1f \n"
  163. " di \n"
  164. " ei \n"
  165. "1: \n"
  166. #elif defined(CONFIG_CPU_MIPSR2)
  167. /*
  168. * Fast, dangerous. Life is fun, life is good.
  169. */
  170. " mfc0 $1, $12 \n"
  171. " ins $1, \\flags, 0, 1 \n"
  172. " mtc0 $1, $12 \n"
  173. #else
  174. " mfc0 $1, $12 \n"
  175. " andi \\flags, 1 \n"
  176. " ori $1, 0x1f \n"
  177. " xori $1, 0x1f \n"
  178. " or \\flags, $1 \n"
  179. " mtc0 \\flags, $12 \n"
  180. #endif
  181. " irq_disable_hazard \n"
  182. " .set pop \n"
  183. " .endm \n");
  184. static inline void raw_local_irq_restore(unsigned long flags)
  185. {
  186. unsigned long __tmp1;
  187. #ifdef CONFIG_MIPS_MT_SMTC
  188. /*
  189. * SMTC kernel needs to do a software replay of queued
  190. * IPIs, at the cost of branch and call overhead on each
  191. * local_irq_restore()
  192. */
  193. if (unlikely(!(flags & 0x0400)))
  194. smtc_ipi_replay();
  195. #endif
  196. __asm__ __volatile__(
  197. "raw_local_irq_restore\t%0"
  198. : "=r" (__tmp1)
  199. : "0" (flags)
  200. : "memory");
  201. }
  202. static inline void __raw_local_irq_restore(unsigned long flags)
  203. {
  204. unsigned long __tmp1;
  205. __asm__ __volatile__(
  206. "raw_local_irq_restore\t%0"
  207. : "=r" (__tmp1)
  208. : "0" (flags)
  209. : "memory");
  210. }
  211. static inline int raw_irqs_disabled_flags(unsigned long flags)
  212. {
  213. #ifdef CONFIG_MIPS_MT_SMTC
  214. /*
  215. * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
  216. */
  217. return flags & 0x400;
  218. #else
  219. return !(flags & 1);
  220. #endif
  221. }
  222. #endif
  223. /*
  224. * Do the CPU's IRQ-state tracing from assembly code.
  225. */
  226. #ifdef CONFIG_TRACE_IRQFLAGS
  227. /* Reload some registers clobbered by trace_hardirqs_on */
  228. #ifdef CONFIG_64BIT
  229. # define TRACE_IRQS_RELOAD_REGS \
  230. LONG_L $11, PT_R11(sp); \
  231. LONG_L $10, PT_R10(sp); \
  232. LONG_L $9, PT_R9(sp); \
  233. LONG_L $8, PT_R8(sp); \
  234. LONG_L $7, PT_R7(sp); \
  235. LONG_L $6, PT_R6(sp); \
  236. LONG_L $5, PT_R5(sp); \
  237. LONG_L $4, PT_R4(sp); \
  238. LONG_L $2, PT_R2(sp)
  239. #else
  240. # define TRACE_IRQS_RELOAD_REGS \
  241. LONG_L $7, PT_R7(sp); \
  242. LONG_L $6, PT_R6(sp); \
  243. LONG_L $5, PT_R5(sp); \
  244. LONG_L $4, PT_R4(sp); \
  245. LONG_L $2, PT_R2(sp)
  246. #endif
  247. # define TRACE_IRQS_ON \
  248. CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
  249. jal trace_hardirqs_on
  250. # define TRACE_IRQS_ON_RELOAD \
  251. TRACE_IRQS_ON; \
  252. TRACE_IRQS_RELOAD_REGS
  253. # define TRACE_IRQS_OFF \
  254. jal trace_hardirqs_off
  255. #else
  256. # define TRACE_IRQS_ON
  257. # define TRACE_IRQS_ON_RELOAD
  258. # define TRACE_IRQS_OFF
  259. #endif
  260. #endif /* _ASM_IRQFLAGS_H */