irqflags.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /*
  2. * include/asm-i386/irqflags.h
  3. *
  4. * IRQ flags handling
  5. *
  6. * This file gets included from lowlevel asm headers too, to provide
  7. * wrapped versions of the local_irq_*() APIs, based on the
  8. * raw_local_irq_*() functions from the lowlevel headers.
  9. */
  10. #ifndef _ASM_IRQFLAGS_H
  11. #define _ASM_IRQFLAGS_H
  12. #ifdef CONFIG_PARAVIRT
  13. #include <asm/paravirt.h>
  14. #else
  15. #ifndef __ASSEMBLY__
  16. static inline unsigned long __raw_local_save_flags(void)
  17. {
  18. unsigned long flags;
  19. __asm__ __volatile__(
  20. "pushfl ; popl %0"
  21. : "=g" (flags)
  22. : /* no input */
  23. );
  24. return flags;
  25. }
  26. static inline void raw_local_irq_restore(unsigned long flags)
  27. {
  28. __asm__ __volatile__(
  29. "pushl %0 ; popfl"
  30. : /* no output */
  31. :"g" (flags)
  32. :"memory", "cc"
  33. );
  34. }
  35. static inline void raw_local_irq_disable(void)
  36. {
  37. __asm__ __volatile__("cli" : : : "memory");
  38. }
  39. static inline void raw_local_irq_enable(void)
  40. {
  41. __asm__ __volatile__("sti" : : : "memory");
  42. }
  43. /*
  44. * Used in the idle loop; sti takes one instruction cycle
  45. * to complete:
  46. */
  47. static inline void raw_safe_halt(void)
  48. {
  49. __asm__ __volatile__("sti; hlt" : : : "memory");
  50. }
  51. /*
  52. * Used when interrupts are already enabled or to
  53. * shutdown the processor:
  54. */
  55. static inline void halt(void)
  56. {
  57. __asm__ __volatile__("hlt": : :"memory");
  58. }
  59. /*
  60. * For spinlocks, etc:
  61. */
  62. static inline unsigned long __raw_local_irq_save(void)
  63. {
  64. unsigned long flags = __raw_local_save_flags();
  65. raw_local_irq_disable();
  66. return flags;
  67. }
  68. #else
  69. #define DISABLE_INTERRUPTS(clobbers) cli
  70. #define ENABLE_INTERRUPTS(clobbers) sti
  71. #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
  72. #define INTERRUPT_RETURN iret
  73. #define GET_CR0_INTO_EAX movl %cr0, %eax
  74. #endif /* __ASSEMBLY__ */
  75. #endif /* CONFIG_PARAVIRT */
  76. #ifndef __ASSEMBLY__
  77. #define raw_local_save_flags(flags) \
  78. do { (flags) = __raw_local_save_flags(); } while (0)
  79. #define raw_local_irq_save(flags) \
  80. do { (flags) = __raw_local_irq_save(); } while (0)
  81. static inline int raw_irqs_disabled_flags(unsigned long flags)
  82. {
  83. return !(flags & (1 << 9));
  84. }
  85. static inline int raw_irqs_disabled(void)
  86. {
  87. unsigned long flags = __raw_local_save_flags();
  88. return raw_irqs_disabled_flags(flags);
  89. }
  90. #endif /* __ASSEMBLY__ */
  91. /*
  92. * Do the CPU's IRQ-state tracing from assembly code. We call a
  93. * C function, so save all the C-clobbered registers:
  94. */
  95. #ifdef CONFIG_TRACE_IRQFLAGS
  96. # define TRACE_IRQS_ON \
  97. pushl %eax; \
  98. pushl %ecx; \
  99. pushl %edx; \
  100. call trace_hardirqs_on; \
  101. popl %edx; \
  102. popl %ecx; \
  103. popl %eax;
  104. # define TRACE_IRQS_OFF \
  105. pushl %eax; \
  106. pushl %ecx; \
  107. pushl %edx; \
  108. call trace_hardirqs_off; \
  109. popl %edx; \
  110. popl %ecx; \
  111. popl %eax;
  112. #else
  113. # define TRACE_IRQS_ON
  114. # define TRACE_IRQS_OFF
  115. #endif
  116. #endif