irqflags.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * include/asm-x86_64/irqflags.h
  3. *
  4. * IRQ flags handling
  5. *
  6. * This file gets included from lowlevel asm headers too, to provide
  7. * wrapped versions of the local_irq_*() APIs, based on the
  8. * raw_local_irq_*() functions from the lowlevel headers.
  9. */
  10. #ifndef _ASM_IRQFLAGS_H
  11. #define _ASM_IRQFLAGS_H
  12. #ifndef __ASSEMBLY__
  13. /*
  14. * Interrupt control:
  15. */
  16. static inline unsigned long __raw_local_save_flags(void)
  17. {
  18. unsigned long flags;
  19. __asm__ __volatile__(
  20. "# __raw_save_flags\n\t"
  21. "pushfq ; popq %q0"
  22. : "=g" (flags)
  23. : /* no input */
  24. : "memory"
  25. );
  26. return flags;
  27. }
  28. #define raw_local_save_flags(flags) \
  29. do { (flags) = __raw_local_save_flags(); } while (0)
  30. static inline void raw_local_irq_restore(unsigned long flags)
  31. {
  32. __asm__ __volatile__(
  33. "pushq %0 ; popfq"
  34. : /* no output */
  35. :"g" (flags)
  36. :"memory", "cc"
  37. );
  38. }
  39. #ifdef CONFIG_X86_VSMP
  40. /*
  41. * Interrupt control for the VSMP architecture:
  42. */
  43. static inline void raw_local_irq_disable(void)
  44. {
  45. unsigned long flags = __raw_local_save_flags();
  46. raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
  47. }
  48. static inline void raw_local_irq_enable(void)
  49. {
  50. unsigned long flags = __raw_local_save_flags();
  51. raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
  52. }
  53. static inline int raw_irqs_disabled_flags(unsigned long flags)
  54. {
  55. return !(flags & (1<<9)) || (flags & (1 << 18));
  56. }
  57. #else /* CONFIG_X86_VSMP */
  58. static inline void raw_local_irq_disable(void)
  59. {
  60. __asm__ __volatile__("cli" : : : "memory");
  61. }
  62. static inline void raw_local_irq_enable(void)
  63. {
  64. __asm__ __volatile__("sti" : : : "memory");
  65. }
  66. static inline int raw_irqs_disabled_flags(unsigned long flags)
  67. {
  68. return !(flags & (1 << 9));
  69. }
  70. #endif
  71. /*
  72. * For spinlocks, etc.:
  73. */
  74. static inline unsigned long __raw_local_irq_save(void)
  75. {
  76. unsigned long flags = __raw_local_save_flags();
  77. raw_local_irq_disable();
  78. return flags;
  79. }
  80. #define raw_local_irq_save(flags) \
  81. do { (flags) = __raw_local_irq_save(); } while (0)
  82. static inline int raw_irqs_disabled(void)
  83. {
  84. unsigned long flags = __raw_local_save_flags();
  85. return raw_irqs_disabled_flags(flags);
  86. }
  87. /*
  88. * Used in the idle loop; sti takes one instruction cycle
  89. * to complete:
  90. */
  91. static inline void raw_safe_halt(void)
  92. {
  93. __asm__ __volatile__("sti; hlt" : : : "memory");
  94. }
  95. /*
  96. * Used when interrupts are already enabled or to
  97. * shutdown the processor:
  98. */
  99. static inline void halt(void)
  100. {
  101. __asm__ __volatile__("hlt": : :"memory");
  102. }
  103. #else /* __ASSEMBLY__: */
  104. # ifdef CONFIG_TRACE_IRQFLAGS
  105. # define TRACE_IRQS_ON call trace_hardirqs_on_thunk
  106. # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
  107. # else
  108. # define TRACE_IRQS_ON
  109. # define TRACE_IRQS_OFF
  110. # endif
  111. #endif
  112. #endif