irqflags.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. #ifndef _X86_IRQFLAGS_H_
  2. #define _X86_IRQFLAGS_H_
  3. #include <asm/processor-flags.h>
  4. #ifndef __ASSEMBLY__
  5. /*
  6. * Interrupt control:
  7. */
  8. static inline unsigned long native_save_fl(void)
  9. {
  10. unsigned long flags;
  11. /*
  12. * "=rm" is safe here, because "pop" adjusts the stack before
  13. * it evaluates its effective address -- this is part of the
  14. * documented behavior of the "pop" instruction.
  15. */
  16. asm volatile("# __raw_save_flags\n\t"
  17. "pushf ; pop %0"
  18. : "=rm" (flags)
  19. : /* no input */
  20. : "memory");
  21. return flags;
  22. }
  23. static inline void native_restore_fl(unsigned long flags)
  24. {
  25. asm volatile("push %0 ; popf"
  26. : /* no output */
  27. :"g" (flags)
  28. :"memory", "cc");
  29. }
  30. static inline void native_irq_disable(void)
  31. {
  32. asm volatile("cli": : :"memory");
  33. }
  34. static inline void native_irq_enable(void)
  35. {
  36. asm volatile("sti": : :"memory");
  37. }
  38. static inline void native_safe_halt(void)
  39. {
  40. asm volatile("sti; hlt": : :"memory");
  41. }
  42. static inline void native_halt(void)
  43. {
  44. asm volatile("hlt": : :"memory");
  45. }
  46. #endif
  47. #ifdef CONFIG_PARAVIRT
  48. #include <asm/paravirt.h>
  49. #else
  50. #ifndef __ASSEMBLY__
  51. static inline unsigned long __raw_local_save_flags(void)
  52. {
  53. return native_save_fl();
  54. }
  55. static inline void raw_local_irq_restore(unsigned long flags)
  56. {
  57. native_restore_fl(flags);
  58. }
  59. static inline void raw_local_irq_disable(void)
  60. {
  61. native_irq_disable();
  62. }
  63. static inline void raw_local_irq_enable(void)
  64. {
  65. native_irq_enable();
  66. }
  67. /*
  68. * Used in the idle loop; sti takes one instruction cycle
  69. * to complete:
  70. */
  71. static inline void raw_safe_halt(void)
  72. {
  73. native_safe_halt();
  74. }
  75. /*
  76. * Used when interrupts are already enabled or to
  77. * shutdown the processor:
  78. */
  79. static inline void halt(void)
  80. {
  81. native_halt();
  82. }
  83. /*
  84. * For spinlocks, etc:
  85. */
  86. static inline unsigned long __raw_local_irq_save(void)
  87. {
  88. unsigned long flags = __raw_local_save_flags();
  89. raw_local_irq_disable();
  90. return flags;
  91. }
  92. #else
  93. #define ENABLE_INTERRUPTS(x) sti
  94. #define DISABLE_INTERRUPTS(x) cli
  95. #ifdef CONFIG_X86_64
  96. #define SWAPGS swapgs
  97. /*
  98. * Currently paravirt can't handle swapgs nicely when we
  99. * don't have a stack we can rely on (such as a user space
  100. * stack). So we either find a way around these or just fault
  101. * and emulate if a guest tries to call swapgs directly.
  102. *
  103. * Either way, this is a good way to document that we don't
  104. * have a reliable stack. x86_64 only.
  105. */
  106. #define SWAPGS_UNSAFE_STACK swapgs
  107. #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
  108. #define INTERRUPT_RETURN iretq
  109. #define USERGS_SYSRET64 \
  110. swapgs; \
  111. sysretq;
  112. #define USERGS_SYSRET32 \
  113. swapgs; \
  114. sysretl
  115. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  116. swapgs; \
  117. sti; \
  118. sysexit
  119. #else
  120. #define INTERRUPT_RETURN iret
  121. #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
  122. #define GET_CR0_INTO_EAX movl %cr0, %eax
  123. #endif
  124. #endif /* __ASSEMBLY__ */
  125. #endif /* CONFIG_PARAVIRT */
  126. #ifndef __ASSEMBLY__
  127. #define raw_local_save_flags(flags) \
  128. do { (flags) = __raw_local_save_flags(); } while (0)
  129. #define raw_local_irq_save(flags) \
  130. do { (flags) = __raw_local_irq_save(); } while (0)
  131. static inline int raw_irqs_disabled_flags(unsigned long flags)
  132. {
  133. return !(flags & X86_EFLAGS_IF);
  134. }
  135. static inline int raw_irqs_disabled(void)
  136. {
  137. unsigned long flags = __raw_local_save_flags();
  138. return raw_irqs_disabled_flags(flags);
  139. }
  140. #else
  141. #ifdef CONFIG_X86_64
  142. #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
  143. #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
  144. TRACE_IRQS_ON; \
  145. sti; \
  146. SAVE_REST; \
  147. LOCKDEP_SYS_EXIT; \
  148. RESTORE_REST; \
  149. cli; \
  150. TRACE_IRQS_OFF;
  151. #else
  152. #define ARCH_LOCKDEP_SYS_EXIT \
  153. pushl %eax; \
  154. pushl %ecx; \
  155. pushl %edx; \
  156. call lockdep_sys_exit; \
  157. popl %edx; \
  158. popl %ecx; \
  159. popl %eax;
  160. #define ARCH_LOCKDEP_SYS_EXIT_IRQ
  161. #endif
  162. #ifdef CONFIG_TRACE_IRQFLAGS
  163. # define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
  164. # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
  165. #else
  166. # define TRACE_IRQS_ON
  167. # define TRACE_IRQS_OFF
  168. #endif
  169. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  170. # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
  171. # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
  172. # else
  173. # define LOCKDEP_SYS_EXIT
  174. # define LOCKDEP_SYS_EXIT_IRQ
  175. # endif
  176. #endif /* __ASSEMBLY__ */
  177. #endif