hw_irq.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  3. */
  4. #ifndef _ASM_POWERPC_HW_IRQ_H
  5. #define _ASM_POWERPC_HW_IRQ_H
  6. #ifdef __KERNEL__
  7. #include <linux/errno.h>
  8. #include <linux/compiler.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/processor.h>
  11. extern void timer_interrupt(struct pt_regs *);
  12. #ifdef CONFIG_PPC64
  13. #include <asm/paca.h>
  14. static inline unsigned long local_get_flags(void)
  15. {
  16. unsigned long flags;
  17. __asm__ __volatile__("lbz %0,%1(13)"
  18. : "=r" (flags)
  19. : "i" (offsetof(struct paca_struct, soft_enabled)));
  20. return flags;
  21. }
  22. static inline unsigned long raw_local_irq_disable(void)
  23. {
  24. unsigned long flags, zero;
  25. __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
  26. : "=r" (flags), "=&r" (zero)
  27. : "i" (offsetof(struct paca_struct, soft_enabled))
  28. : "memory");
  29. return flags;
  30. }
  31. extern void raw_local_irq_restore(unsigned long);
  32. extern void iseries_handle_interrupts(void);
  33. #define raw_local_irq_enable() raw_local_irq_restore(1)
  34. #define raw_local_save_flags(flags) ((flags) = local_get_flags())
  35. #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
  36. #define raw_irqs_disabled() (local_get_flags() == 0)
  37. #define raw_irqs_disabled_flags(flags) ((flags) == 0)
  38. #ifdef CONFIG_PPC_BOOK3E
  39. #define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory");
  40. #define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory");
  41. #else
  42. #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
  43. #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
  44. #endif
  45. #define hard_irq_disable() \
  46. do { \
  47. __hard_irq_disable(); \
  48. get_paca()->soft_enabled = 0; \
  49. get_paca()->hard_enabled = 0; \
  50. } while(0)
  51. static inline int irqs_disabled_flags(unsigned long flags)
  52. {
  53. return flags == 0;
  54. }
  55. #else
  56. #if defined(CONFIG_BOOKE)
  57. #define SET_MSR_EE(x) mtmsr(x)
  58. #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
  59. #else
  60. #define SET_MSR_EE(x) mtmsr(x)
  61. #define raw_local_irq_restore(flags) mtmsr(flags)
  62. #endif
  63. static inline void raw_local_irq_disable(void)
  64. {
  65. #ifdef CONFIG_BOOKE
  66. __asm__ __volatile__("wrteei 0": : :"memory");
  67. #else
  68. unsigned long msr;
  69. msr = mfmsr();
  70. SET_MSR_EE(msr & ~MSR_EE);
  71. #endif
  72. }
  73. static inline void raw_local_irq_enable(void)
  74. {
  75. #ifdef CONFIG_BOOKE
  76. __asm__ __volatile__("wrteei 1": : :"memory");
  77. #else
  78. unsigned long msr;
  79. msr = mfmsr();
  80. SET_MSR_EE(msr | MSR_EE);
  81. #endif
  82. }
  83. static inline void raw_local_irq_save_ptr(unsigned long *flags)
  84. {
  85. unsigned long msr;
  86. msr = mfmsr();
  87. *flags = msr;
  88. #ifdef CONFIG_BOOKE
  89. __asm__ __volatile__("wrteei 0": : :"memory");
  90. #else
  91. SET_MSR_EE(msr & ~MSR_EE);
  92. #endif
  93. }
  94. #define raw_local_save_flags(flags) ((flags) = mfmsr())
  95. #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
  96. #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
  97. #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
  98. #define hard_irq_disable() raw_local_irq_disable()
  99. static inline int irqs_disabled_flags(unsigned long flags)
  100. {
  101. return (flags & MSR_EE) == 0;
  102. }
  103. #endif /* CONFIG_PPC64 */
  104. /*
  105. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  106. * or should we not care like we do now ? --BenH.
  107. */
  108. struct irq_chip;
  109. #ifdef CONFIG_PERF_EVENTS
  110. #ifdef CONFIG_PPC64
  111. static inline unsigned long test_perf_event_pending(void)
  112. {
  113. unsigned long x;
  114. asm volatile("lbz %0,%1(13)"
  115. : "=r" (x)
  116. : "i" (offsetof(struct paca_struct, perf_event_pending)));
  117. return x;
  118. }
  119. static inline void set_perf_event_pending(void)
  120. {
  121. asm volatile("stb %0,%1(13)" : :
  122. "r" (1),
  123. "i" (offsetof(struct paca_struct, perf_event_pending)));
  124. }
  125. static inline void clear_perf_event_pending(void)
  126. {
  127. asm volatile("stb %0,%1(13)" : :
  128. "r" (0),
  129. "i" (offsetof(struct paca_struct, perf_event_pending)));
  130. }
  131. #endif /* CONFIG_PPC64 */
  132. #else /* CONFIG_PERF_EVENTS */
  133. static inline unsigned long test_perf_event_pending(void)
  134. {
  135. return 0;
  136. }
  137. static inline void clear_perf_event_pending(void) {}
  138. #endif /* CONFIG_PERF_EVENTS */
  139. #endif /* __KERNEL__ */
  140. #endif /* _ASM_POWERPC_HW_IRQ_H */