irqflags.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. #ifndef _X86_IRQFLAGS_H_
  2. #define _X86_IRQFLAGS_H_
  3. #include <asm/processor-flags.h>
  4. #ifndef __ASSEMBLY__
  5. /*
  6. * Interrupt control:
  7. */
  8. static inline unsigned long native_save_fl(void)
  9. {
  10. unsigned long flags;
  11. __asm__ __volatile__(
  12. "# __raw_save_flags\n\t"
  13. "pushf ; pop %0"
  14. : "=g" (flags)
  15. : /* no input */
  16. : "memory"
  17. );
  18. return flags;
  19. }
  20. static inline void native_restore_fl(unsigned long flags)
  21. {
  22. __asm__ __volatile__(
  23. "push %0 ; popf"
  24. : /* no output */
  25. :"g" (flags)
  26. :"memory", "cc"
  27. );
  28. }
  29. static inline void native_irq_disable(void)
  30. {
  31. asm volatile("cli": : :"memory");
  32. }
  33. static inline void native_irq_enable(void)
  34. {
  35. asm volatile("sti": : :"memory");
  36. }
  37. static inline void native_safe_halt(void)
  38. {
  39. asm volatile("sti; hlt": : :"memory");
  40. }
  41. static inline void native_halt(void)
  42. {
  43. asm volatile("hlt": : :"memory");
  44. }
  45. #endif
  46. #ifdef CONFIG_PARAVIRT
  47. #include <asm/paravirt.h>
  48. #else
  49. #ifndef __ASSEMBLY__
  50. static inline unsigned long __raw_local_save_flags(void)
  51. {
  52. return native_save_fl();
  53. }
  54. static inline void raw_local_irq_restore(unsigned long flags)
  55. {
  56. native_restore_fl(flags);
  57. }
  58. #ifdef CONFIG_X86_VSMP
  59. /*
  60. * Interrupt control for the VSMP architecture:
  61. */
  62. static inline void raw_local_irq_disable(void)
  63. {
  64. unsigned long flags = __raw_local_save_flags();
  65. raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
  66. }
  67. static inline void raw_local_irq_enable(void)
  68. {
  69. unsigned long flags = __raw_local_save_flags();
  70. raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
  71. }
  72. #else
  73. static inline void raw_local_irq_disable(void)
  74. {
  75. native_irq_disable();
  76. }
  77. static inline void raw_local_irq_enable(void)
  78. {
  79. native_irq_enable();
  80. }
  81. #endif
  82. /*
  83. * Used in the idle loop; sti takes one instruction cycle
  84. * to complete:
  85. */
  86. static inline void raw_safe_halt(void)
  87. {
  88. native_safe_halt();
  89. }
  90. /*
  91. * Used when interrupts are already enabled or to
  92. * shutdown the processor:
  93. */
  94. static inline void halt(void)
  95. {
  96. native_halt();
  97. }
  98. /*
  99. * For spinlocks, etc:
  100. */
  101. static inline unsigned long __raw_local_irq_save(void)
  102. {
  103. unsigned long flags = __raw_local_save_flags();
  104. raw_local_irq_disable();
  105. return flags;
  106. }
  107. #else
  108. #define ENABLE_INTERRUPTS(x) sti
  109. #define DISABLE_INTERRUPTS(x) cli
  110. #ifdef CONFIG_X86_64
  111. #define INTERRUPT_RETURN iretq
  112. #define ENABLE_INTERRUPTS_SYSCALL_RET \
  113. movq %gs:pda_oldrsp, %rsp; \
  114. swapgs; \
  115. sysretq;
  116. #else
  117. #define INTERRUPT_RETURN iret
  118. #define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit
  119. #define GET_CR0_INTO_EAX movl %cr0, %eax
  120. #endif
  121. #endif /* __ASSEMBLY__ */
  122. #endif /* CONFIG_PARAVIRT */
  123. #ifndef __ASSEMBLY__
  124. #define raw_local_save_flags(flags) \
  125. do { (flags) = __raw_local_save_flags(); } while (0)
  126. #define raw_local_irq_save(flags) \
  127. do { (flags) = __raw_local_irq_save(); } while (0)
  128. #ifdef CONFIG_X86_VSMP
  129. static inline int raw_irqs_disabled_flags(unsigned long flags)
  130. {
  131. return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
  132. }
  133. #else
  134. static inline int raw_irqs_disabled_flags(unsigned long flags)
  135. {
  136. return !(flags & X86_EFLAGS_IF);
  137. }
  138. #endif
  139. static inline int raw_irqs_disabled(void)
  140. {
  141. unsigned long flags = __raw_local_save_flags();
  142. return raw_irqs_disabled_flags(flags);
  143. }
  144. /*
  145. * makes the traced hardirq state match with the machine state
  146. *
  147. * should be a rarely used function, only in places where its
  148. * otherwise impossible to know the irq state, like in traps.
  149. */
  150. static inline void trace_hardirqs_fixup_flags(unsigned long flags)
  151. {
  152. if (raw_irqs_disabled_flags(flags))
  153. trace_hardirqs_off();
  154. else
  155. trace_hardirqs_on();
  156. }
  157. static inline void trace_hardirqs_fixup(void)
  158. {
  159. unsigned long flags = __raw_local_save_flags();
  160. trace_hardirqs_fixup_flags(flags);
  161. }
  162. #else
  163. #ifdef CONFIG_X86_64
  164. /*
  165. * Currently paravirt can't handle swapgs nicely when we
  166. * don't have a stack we can rely on (such as a user space
  167. * stack). So we either find a way around these or just fault
  168. * and emulate if a guest tries to call swapgs directly.
  169. *
  170. * Either way, this is a good way to document that we don't
  171. * have a reliable stack. x86_64 only.
  172. */
  173. #define SWAPGS_UNSAFE_STACK swapgs
  174. #define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk
  175. #define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk
  176. #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
  177. #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
  178. TRACE_IRQS_ON; \
  179. sti; \
  180. SAVE_REST; \
  181. LOCKDEP_SYS_EXIT; \
  182. RESTORE_REST; \
  183. cli; \
  184. TRACE_IRQS_OFF;
  185. #else
  186. #define ARCH_TRACE_IRQS_ON \
  187. pushl %eax; \
  188. pushl %ecx; \
  189. pushl %edx; \
  190. call trace_hardirqs_on; \
  191. popl %edx; \
  192. popl %ecx; \
  193. popl %eax;
  194. #define ARCH_TRACE_IRQS_OFF \
  195. pushl %eax; \
  196. pushl %ecx; \
  197. pushl %edx; \
  198. call trace_hardirqs_off; \
  199. popl %edx; \
  200. popl %ecx; \
  201. popl %eax;
  202. #define ARCH_LOCKDEP_SYS_EXIT \
  203. pushl %eax; \
  204. pushl %ecx; \
  205. pushl %edx; \
  206. call lockdep_sys_exit; \
  207. popl %edx; \
  208. popl %ecx; \
  209. popl %eax;
  210. #define ARCH_LOCKDEP_SYS_EXIT_IRQ
  211. #endif
  212. #ifdef CONFIG_TRACE_IRQFLAGS
  213. # define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON
  214. # define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF
  215. #else
  216. # define TRACE_IRQS_ON
  217. # define TRACE_IRQS_OFF
  218. #endif
  219. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  220. # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
  221. # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
  222. # else
  223. # define LOCKDEP_SYS_EXIT
  224. # define LOCKDEP_SYS_EXIT_IRQ
  225. # endif
  226. #endif /* __ASSEMBLY__ */
  227. #endif