irqflags_64.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * include/asm-x86_64/irqflags.h
  3. *
  4. * IRQ flags handling
  5. *
  6. * This file gets included from lowlevel asm headers too, to provide
  7. * wrapped versions of the local_irq_*() APIs, based on the
  8. * raw_local_irq_*() functions from the lowlevel headers.
  9. */
  10. #ifndef _ASM_IRQFLAGS_H
  11. #define _ASM_IRQFLAGS_H
  12. #include <asm/processor-flags.h>
  13. #ifndef __ASSEMBLY__
  14. /*
  15. * Interrupt control:
  16. */
  17. static inline unsigned long __raw_local_save_flags(void)
  18. {
  19. unsigned long flags;
  20. __asm__ __volatile__(
  21. "# __raw_save_flags\n\t"
  22. "pushfq ; popq %q0"
  23. : "=g" (flags)
  24. : /* no input */
  25. : "memory"
  26. );
  27. return flags;
  28. }
  29. #define raw_local_save_flags(flags) \
  30. do { (flags) = __raw_local_save_flags(); } while (0)
  31. static inline void raw_local_irq_restore(unsigned long flags)
  32. {
  33. __asm__ __volatile__(
  34. "pushq %0 ; popfq"
  35. : /* no output */
  36. :"g" (flags)
  37. :"memory", "cc"
  38. );
  39. }
  40. #ifdef CONFIG_X86_VSMP
  41. /*
  42. * Interrupt control for the VSMP architecture:
  43. */
  44. static inline void raw_local_irq_disable(void)
  45. {
  46. unsigned long flags = __raw_local_save_flags();
  47. raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
  48. }
  49. static inline void raw_local_irq_enable(void)
  50. {
  51. unsigned long flags = __raw_local_save_flags();
  52. raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
  53. }
  54. static inline int raw_irqs_disabled_flags(unsigned long flags)
  55. {
  56. return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
  57. }
  58. #else /* CONFIG_X86_VSMP */
  59. static inline void raw_local_irq_disable(void)
  60. {
  61. __asm__ __volatile__("cli" : : : "memory");
  62. }
  63. static inline void raw_local_irq_enable(void)
  64. {
  65. __asm__ __volatile__("sti" : : : "memory");
  66. }
  67. static inline int raw_irqs_disabled_flags(unsigned long flags)
  68. {
  69. return !(flags & X86_EFLAGS_IF);
  70. }
  71. #endif
  72. /*
  73. * For spinlocks, etc.:
  74. */
  75. static inline unsigned long __raw_local_irq_save(void)
  76. {
  77. unsigned long flags = __raw_local_save_flags();
  78. raw_local_irq_disable();
  79. return flags;
  80. }
  81. #define raw_local_irq_save(flags) \
  82. do { (flags) = __raw_local_irq_save(); } while (0)
  83. static inline int raw_irqs_disabled(void)
  84. {
  85. unsigned long flags = __raw_local_save_flags();
  86. return raw_irqs_disabled_flags(flags);
  87. }
  88. /*
  89. * Used in the idle loop; sti takes one instruction cycle
  90. * to complete:
  91. */
  92. static inline void raw_safe_halt(void)
  93. {
  94. __asm__ __volatile__("sti; hlt" : : : "memory");
  95. }
  96. /*
  97. * Used when interrupts are already enabled or to
  98. * shutdown the processor:
  99. */
  100. static inline void halt(void)
  101. {
  102. __asm__ __volatile__("hlt": : :"memory");
  103. }
  104. #else /* __ASSEMBLY__: */
  105. # ifdef CONFIG_TRACE_IRQFLAGS
  106. # define TRACE_IRQS_ON call trace_hardirqs_on_thunk
  107. # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
  108. # else
  109. # define TRACE_IRQS_ON
  110. # define TRACE_IRQS_OFF
  111. # endif
  112. #endif
  113. #endif