irqflags.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. #ifndef _X86_IRQFLAGS_H_
  2. #define _X86_IRQFLAGS_H_
  3. #include <asm/processor-flags.h>
  4. #ifndef __ASSEMBLY__
  5. /*
  6. * Interrupt control:
  7. */
  8. static inline unsigned long native_save_fl(void)
  9. {
  10. unsigned long flags;
  11. asm volatile("# __raw_save_flags\n\t"
  12. "pushf ; pop %0"
  13. : "=g" (flags)
  14. : /* no input */
  15. : "memory");
  16. return flags;
  17. }
  18. static inline void native_restore_fl(unsigned long flags)
  19. {
  20. asm volatile("push %0 ; popf"
  21. : /* no output */
  22. :"g" (flags)
  23. :"memory", "cc");
  24. }
  25. static inline void native_irq_disable(void)
  26. {
  27. asm volatile("cli": : :"memory");
  28. }
  29. static inline void native_irq_enable(void)
  30. {
  31. asm volatile("sti": : :"memory");
  32. }
  33. static inline void native_safe_halt(void)
  34. {
  35. asm volatile("sti; hlt": : :"memory");
  36. }
  37. static inline void native_halt(void)
  38. {
  39. asm volatile("hlt": : :"memory");
  40. }
  41. #endif
  42. #ifdef CONFIG_PARAVIRT
  43. #include <asm/paravirt.h>
  44. #else
  45. #ifndef __ASSEMBLY__
  46. static inline unsigned long __raw_local_save_flags(void)
  47. {
  48. return native_save_fl();
  49. }
  50. static inline void raw_local_irq_restore(unsigned long flags)
  51. {
  52. native_restore_fl(flags);
  53. }
  54. static inline void raw_local_irq_disable(void)
  55. {
  56. native_irq_disable();
  57. }
  58. static inline void raw_local_irq_enable(void)
  59. {
  60. native_irq_enable();
  61. }
  62. /*
  63. * Used in the idle loop; sti takes one instruction cycle
  64. * to complete:
  65. */
  66. static inline void raw_safe_halt(void)
  67. {
  68. native_safe_halt();
  69. }
  70. /*
  71. * Used when interrupts are already enabled or to
  72. * shutdown the processor:
  73. */
  74. static inline void halt(void)
  75. {
  76. native_halt();
  77. }
  78. /*
  79. * For spinlocks, etc:
  80. */
  81. static inline unsigned long __raw_local_irq_save(void)
  82. {
  83. unsigned long flags = __raw_local_save_flags();
  84. raw_local_irq_disable();
  85. return flags;
  86. }
  87. #else
  88. #define ENABLE_INTERRUPTS(x) sti
  89. #define DISABLE_INTERRUPTS(x) cli
  90. #ifdef CONFIG_X86_64
  91. #define INTERRUPT_RETURN iretq
  92. #define ENABLE_INTERRUPTS_SYSCALL_RET \
  93. movq %gs:pda_oldrsp, %rsp; \
  94. swapgs; \
  95. sysretq;
  96. #else
  97. #define INTERRUPT_RETURN iret
  98. #define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit
  99. #define GET_CR0_INTO_EAX movl %cr0, %eax
  100. #endif
  101. #endif /* __ASSEMBLY__ */
  102. #endif /* CONFIG_PARAVIRT */
  103. #ifndef __ASSEMBLY__
  104. #define raw_local_save_flags(flags) \
  105. do { (flags) = __raw_local_save_flags(); } while (0)
  106. #define raw_local_irq_save(flags) \
  107. do { (flags) = __raw_local_irq_save(); } while (0)
  108. static inline int raw_irqs_disabled_flags(unsigned long flags)
  109. {
  110. return !(flags & X86_EFLAGS_IF);
  111. }
  112. static inline int raw_irqs_disabled(void)
  113. {
  114. unsigned long flags = __raw_local_save_flags();
  115. return raw_irqs_disabled_flags(flags);
  116. }
  117. /*
  118. * makes the traced hardirq state match with the machine state
  119. *
  120. * should be a rarely used function, only in places where its
  121. * otherwise impossible to know the irq state, like in traps.
  122. */
  123. static inline void trace_hardirqs_fixup_flags(unsigned long flags)
  124. {
  125. if (raw_irqs_disabled_flags(flags))
  126. trace_hardirqs_off();
  127. else
  128. trace_hardirqs_on();
  129. }
  130. static inline void trace_hardirqs_fixup(void)
  131. {
  132. unsigned long flags = __raw_local_save_flags();
  133. trace_hardirqs_fixup_flags(flags);
  134. }
  135. #else
  136. #ifdef CONFIG_X86_64
  137. /*
  138. * Currently paravirt can't handle swapgs nicely when we
  139. * don't have a stack we can rely on (such as a user space
  140. * stack). So we either find a way around these or just fault
  141. * and emulate if a guest tries to call swapgs directly.
  142. *
  143. * Either way, this is a good way to document that we don't
  144. * have a reliable stack. x86_64 only.
  145. */
  146. #define SWAPGS_UNSAFE_STACK swapgs
  147. #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
  148. #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
  149. TRACE_IRQS_ON; \
  150. sti; \
  151. SAVE_REST; \
  152. LOCKDEP_SYS_EXIT; \
  153. RESTORE_REST; \
  154. cli; \
  155. TRACE_IRQS_OFF;
  156. #else
  157. #define ARCH_LOCKDEP_SYS_EXIT \
  158. pushl %eax; \
  159. pushl %ecx; \
  160. pushl %edx; \
  161. call lockdep_sys_exit; \
  162. popl %edx; \
  163. popl %ecx; \
  164. popl %eax;
  165. #define ARCH_LOCKDEP_SYS_EXIT_IRQ
  166. #endif
  167. #ifdef CONFIG_TRACE_IRQFLAGS
  168. # define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
  169. # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
  170. #else
  171. # define TRACE_IRQS_ON
  172. # define TRACE_IRQS_OFF
  173. #endif
  174. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  175. # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
  176. # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
  177. # else
  178. # define LOCKDEP_SYS_EXIT
  179. # define LOCKDEP_SYS_EXIT_IRQ
  180. # endif
  181. #endif /* __ASSEMBLY__ */
  182. #endif