irqflags.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Copyright (C) 2000 MIPS Technologies, Inc.
  10. */
  11. #ifndef _ASM_IRQFLAGS_H
  12. #define _ASM_IRQFLAGS_H
  13. #ifndef __ASSEMBLY__
  14. #include <linux/compiler.h>
  15. #include <linux/stringify.h>
  16. #include <asm/hazards.h>
  17. #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
  18. static inline void arch_local_irq_disable(void)
  19. {
  20. __asm__ __volatile__(
  21. " .set push \n"
  22. " .set noat \n"
  23. " di \n"
  24. " " __stringify(__irq_disable_hazard) " \n"
  25. " .set pop \n"
  26. : /* no outputs */
  27. : /* no inputs */
  28. : "memory");
  29. }
  30. static inline unsigned long arch_local_irq_save(void)
  31. {
  32. unsigned long flags;
  33. asm __volatile__(
  34. " .set push \n"
  35. " .set reorder \n"
  36. " .set noat \n"
  37. " di %[flags] \n"
  38. " andi %[flags], 1 \n"
  39. " " __stringify(__irq_disable_hazard) " \n"
  40. " .set pop \n"
  41. : [flags] "=r" (flags)
  42. : /* no inputs */
  43. : "memory");
  44. return flags;
  45. }
  46. static inline void arch_local_irq_restore(unsigned long flags)
  47. {
  48. unsigned long __tmp1;
  49. __asm__ __volatile__(
  50. " .set push \n"
  51. " .set noreorder \n"
  52. " .set noat \n"
  53. #if defined(CONFIG_IRQ_CPU)
  54. /*
  55. * Slow, but doesn't suffer from a relatively unlikely race
  56. * condition we're having since days 1.
  57. */
  58. " beqz %[flags], 1f \n"
  59. " di \n"
  60. " ei \n"
  61. "1: \n"
  62. #else
  63. /*
  64. * Fast, dangerous. Life is fun, life is good.
  65. */
  66. " mfc0 $1, $12 \n"
  67. " ins $1, %[flags], 0, 1 \n"
  68. " mtc0 $1, $12 \n"
  69. #endif
  70. " " __stringify(__irq_disable_hazard) " \n"
  71. " .set pop \n"
  72. : [flags] "=r" (__tmp1)
  73. : "0" (flags)
  74. : "memory");
  75. }
  76. static inline void __arch_local_irq_restore(unsigned long flags)
  77. {
  78. __asm__ __volatile__(
  79. " .set push \n"
  80. " .set noreorder \n"
  81. " .set noat \n"
  82. #if defined(CONFIG_IRQ_CPU)
  83. /*
  84. * Slow, but doesn't suffer from a relatively unlikely race
  85. * condition we're having since days 1.
  86. */
  87. " beqz %[flags], 1f \n"
  88. " di \n"
  89. " ei \n"
  90. "1: \n"
  91. #else
  92. /*
  93. * Fast, dangerous. Life is fun, life is good.
  94. */
  95. " mfc0 $1, $12 \n"
  96. " ins $1, %[flags], 0, 1 \n"
  97. " mtc0 $1, $12 \n"
  98. #endif
  99. " " __stringify(__irq_disable_hazard) " \n"
  100. " .set pop \n"
  101. : [flags] "=r" (flags)
  102. : "0" (flags)
  103. : "memory");
  104. }
  105. #else
  106. /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
  107. void arch_local_irq_disable(void);
  108. unsigned long arch_local_irq_save(void);
  109. void arch_local_irq_restore(unsigned long flags);
  110. void __arch_local_irq_restore(unsigned long flags);
  111. #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
  112. extern void smtc_ipi_replay(void);
  113. static inline void arch_local_irq_enable(void)
  114. {
  115. #ifdef CONFIG_MIPS_MT_SMTC
  116. /*
  117. * SMTC kernel needs to do a software replay of queued
  118. * IPIs, at the cost of call overhead on each local_irq_enable()
  119. */
  120. smtc_ipi_replay();
  121. #endif
  122. __asm__ __volatile__(
  123. " .set push \n"
  124. " .set reorder \n"
  125. " .set noat \n"
  126. #ifdef CONFIG_MIPS_MT_SMTC
  127. " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
  128. " ori $1, 0x400 \n"
  129. " xori $1, 0x400 \n"
  130. " mtc0 $1, $2, 1 \n"
  131. #elif defined(CONFIG_CPU_MIPSR2)
  132. " ei \n"
  133. #else
  134. " mfc0 $1,$12 \n"
  135. " ori $1,0x1f \n"
  136. " xori $1,0x1e \n"
  137. " mtc0 $1,$12 \n"
  138. #endif
  139. " " __stringify(__irq_enable_hazard) " \n"
  140. " .set pop \n"
  141. : /* no outputs */
  142. : /* no inputs */
  143. : "memory");
  144. }
  145. static inline unsigned long arch_local_save_flags(void)
  146. {
  147. unsigned long flags;
  148. asm __volatile__(
  149. " .set push \n"
  150. " .set reorder \n"
  151. #ifdef CONFIG_MIPS_MT_SMTC
  152. " mfc0 %[flags], $2, 1 \n"
  153. #else
  154. " mfc0 %[flags], $12 \n"
  155. #endif
  156. " .set pop \n"
  157. : [flags] "=r" (flags));
  158. return flags;
  159. }
  160. static inline int arch_irqs_disabled_flags(unsigned long flags)
  161. {
  162. #ifdef CONFIG_MIPS_MT_SMTC
  163. /*
  164. * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
  165. */
  166. return flags & 0x400;
  167. #else
  168. return !(flags & 1);
  169. #endif
  170. }
  171. #endif /* #ifndef __ASSEMBLY__ */
  172. /*
  173. * Do the CPU's IRQ-state tracing from assembly code.
  174. */
  175. #ifdef CONFIG_TRACE_IRQFLAGS
  176. /* Reload some registers clobbered by trace_hardirqs_on */
  177. #ifdef CONFIG_64BIT
  178. # define TRACE_IRQS_RELOAD_REGS \
  179. LONG_L $11, PT_R11(sp); \
  180. LONG_L $10, PT_R10(sp); \
  181. LONG_L $9, PT_R9(sp); \
  182. LONG_L $8, PT_R8(sp); \
  183. LONG_L $7, PT_R7(sp); \
  184. LONG_L $6, PT_R6(sp); \
  185. LONG_L $5, PT_R5(sp); \
  186. LONG_L $4, PT_R4(sp); \
  187. LONG_L $2, PT_R2(sp)
  188. #else
  189. # define TRACE_IRQS_RELOAD_REGS \
  190. LONG_L $7, PT_R7(sp); \
  191. LONG_L $6, PT_R6(sp); \
  192. LONG_L $5, PT_R5(sp); \
  193. LONG_L $4, PT_R4(sp); \
  194. LONG_L $2, PT_R2(sp)
  195. #endif
  196. # define TRACE_IRQS_ON \
  197. CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
  198. jal trace_hardirqs_on
  199. # define TRACE_IRQS_ON_RELOAD \
  200. TRACE_IRQS_ON; \
  201. TRACE_IRQS_RELOAD_REGS
  202. # define TRACE_IRQS_OFF \
  203. jal trace_hardirqs_off
  204. #else
  205. # define TRACE_IRQS_ON
  206. # define TRACE_IRQS_ON_RELOAD
  207. # define TRACE_IRQS_OFF
  208. #endif
  209. #endif /* _ASM_IRQFLAGS_H */