irqflags.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. #ifndef _X86_IRQFLAGS_H_
  2. #define _X86_IRQFLAGS_H_
  3. #include <asm/processor-flags.h>
  4. #ifndef __ASSEMBLY__
  5. /*
  6. * Interrupt control:
  7. */
  8. static inline unsigned long native_save_fl(void)
  9. {
  10. unsigned long flags;
  11. /*
  12. * Note: this needs to be "=r" not "=rm", because we have the
  13. * stack offset from what gcc expects at the time the "pop" is
  14. * executed, and so a memory reference with respect to the stack
  15. * would end up using the wrong address.
  16. */
  17. asm volatile("# __raw_save_flags\n\t"
  18. "pushf ; pop %0"
  19. : "=r" (flags)
  20. : /* no input */
  21. : "memory");
  22. return flags;
  23. }
  24. static inline void native_restore_fl(unsigned long flags)
  25. {
  26. asm volatile("push %0 ; popf"
  27. : /* no output */
  28. :"g" (flags)
  29. :"memory", "cc");
  30. }
  31. static inline void native_irq_disable(void)
  32. {
  33. asm volatile("cli": : :"memory");
  34. }
  35. static inline void native_irq_enable(void)
  36. {
  37. asm volatile("sti": : :"memory");
  38. }
  39. static inline void native_safe_halt(void)
  40. {
  41. asm volatile("sti; hlt": : :"memory");
  42. }
  43. static inline void native_halt(void)
  44. {
  45. asm volatile("hlt": : :"memory");
  46. }
  47. #endif
  48. #ifdef CONFIG_PARAVIRT
  49. #include <asm/paravirt.h>
  50. #else
  51. #ifndef __ASSEMBLY__
  52. static inline unsigned long __raw_local_save_flags(void)
  53. {
  54. return native_save_fl();
  55. }
  56. static inline void raw_local_irq_restore(unsigned long flags)
  57. {
  58. native_restore_fl(flags);
  59. }
  60. static inline void raw_local_irq_disable(void)
  61. {
  62. native_irq_disable();
  63. }
  64. static inline void raw_local_irq_enable(void)
  65. {
  66. native_irq_enable();
  67. }
  68. /*
  69. * Used in the idle loop; sti takes one instruction cycle
  70. * to complete:
  71. */
  72. static inline void raw_safe_halt(void)
  73. {
  74. native_safe_halt();
  75. }
  76. /*
  77. * Used when interrupts are already enabled or to
  78. * shutdown the processor:
  79. */
  80. static inline void halt(void)
  81. {
  82. native_halt();
  83. }
  84. /*
  85. * For spinlocks, etc:
  86. */
  87. static inline unsigned long __raw_local_irq_save(void)
  88. {
  89. unsigned long flags = __raw_local_save_flags();
  90. raw_local_irq_disable();
  91. return flags;
  92. }
  93. #else
  94. #define ENABLE_INTERRUPTS(x) sti
  95. #define DISABLE_INTERRUPTS(x) cli
  96. #ifdef CONFIG_X86_64
  97. #define SWAPGS swapgs
  98. /*
  99. * Currently paravirt can't handle swapgs nicely when we
  100. * don't have a stack we can rely on (such as a user space
  101. * stack). So we either find a way around these or just fault
  102. * and emulate if a guest tries to call swapgs directly.
  103. *
  104. * Either way, this is a good way to document that we don't
  105. * have a reliable stack. x86_64 only.
  106. */
  107. #define SWAPGS_UNSAFE_STACK swapgs
  108. #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
  109. #define INTERRUPT_RETURN iretq
  110. #define USERGS_SYSRET64 \
  111. swapgs; \
  112. sysretq;
  113. #define USERGS_SYSRET32 \
  114. swapgs; \
  115. sysretl
  116. #define ENABLE_INTERRUPTS_SYSEXIT32 \
  117. swapgs; \
  118. sti; \
  119. sysexit
  120. #else
  121. #define INTERRUPT_RETURN iret
  122. #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
  123. #define GET_CR0_INTO_EAX movl %cr0, %eax
  124. #endif
  125. #endif /* __ASSEMBLY__ */
  126. #endif /* CONFIG_PARAVIRT */
  127. #ifndef __ASSEMBLY__
  128. #define raw_local_save_flags(flags) \
  129. do { (flags) = __raw_local_save_flags(); } while (0)
  130. #define raw_local_irq_save(flags) \
  131. do { (flags) = __raw_local_irq_save(); } while (0)
  132. static inline int raw_irqs_disabled_flags(unsigned long flags)
  133. {
  134. return !(flags & X86_EFLAGS_IF);
  135. }
  136. static inline int raw_irqs_disabled(void)
  137. {
  138. unsigned long flags = __raw_local_save_flags();
  139. return raw_irqs_disabled_flags(flags);
  140. }
  141. #else
  142. #ifdef CONFIG_X86_64
  143. #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
  144. #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
  145. TRACE_IRQS_ON; \
  146. sti; \
  147. SAVE_REST; \
  148. LOCKDEP_SYS_EXIT; \
  149. RESTORE_REST; \
  150. cli; \
  151. TRACE_IRQS_OFF;
  152. #else
  153. #define ARCH_LOCKDEP_SYS_EXIT \
  154. pushl %eax; \
  155. pushl %ecx; \
  156. pushl %edx; \
  157. call lockdep_sys_exit; \
  158. popl %edx; \
  159. popl %ecx; \
  160. popl %eax;
  161. #define ARCH_LOCKDEP_SYS_EXIT_IRQ
  162. #endif
  163. #ifdef CONFIG_TRACE_IRQFLAGS
  164. # define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
  165. # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
  166. #else
  167. # define TRACE_IRQS_ON
  168. # define TRACE_IRQS_OFF
  169. #endif
  170. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  171. # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
  172. # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
  173. # else
  174. # define LOCKDEP_SYS_EXIT
  175. # define LOCKDEP_SYS_EXIT_IRQ
  176. # endif
  177. #endif /* __ASSEMBLY__ */
  178. #endif