hw_irq.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /*
  2. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  3. */
  4. #ifndef _ASM_POWERPC_HW_IRQ_H
  5. #define _ASM_POWERPC_HW_IRQ_H
  6. #ifdef __KERNEL__
  7. #include <linux/errno.h>
  8. #include <linux/compiler.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/processor.h>
  11. #ifdef CONFIG_PPC64
  12. /*
  13. * PACA flags in paca->irq_happened.
  14. *
  15. * This bits are set when interrupts occur while soft-disabled
  16. * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
  17. * is set whenever we manually hard disable.
  18. */
  19. #define PACA_IRQ_HARD_DIS 0x01
  20. #define PACA_IRQ_DBELL 0x02
  21. #define PACA_IRQ_EE 0x04
  22. #define PACA_IRQ_DEC 0x08 /* Or FIT */
  23. #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
  24. #endif /* CONFIG_PPC64 */
  25. #ifndef __ASSEMBLY__
  26. extern void __replay_interrupt(unsigned int vector);
  27. extern void timer_interrupt(struct pt_regs *);
  28. extern void performance_monitor_exception(struct pt_regs *regs);
  29. extern void WatchdogException(struct pt_regs *regs);
  30. extern void unknown_exception(struct pt_regs *regs);
  31. #ifdef CONFIG_PPC64
  32. #include <asm/paca.h>
  33. static inline unsigned long arch_local_save_flags(void)
  34. {
  35. unsigned long flags;
  36. asm volatile(
  37. "lbz %0,%1(13)"
  38. : "=r" (flags)
  39. : "i" (offsetof(struct paca_struct, soft_enabled)));
  40. return flags;
  41. }
  42. static inline unsigned long arch_local_irq_disable(void)
  43. {
  44. unsigned long flags, zero;
  45. asm volatile(
  46. "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
  47. : "=r" (flags), "=&r" (zero)
  48. : "i" (offsetof(struct paca_struct, soft_enabled))
  49. : "memory");
  50. return flags;
  51. }
  52. extern void arch_local_irq_restore(unsigned long);
  53. static inline void arch_local_irq_enable(void)
  54. {
  55. arch_local_irq_restore(1);
  56. }
  57. static inline unsigned long arch_local_irq_save(void)
  58. {
  59. return arch_local_irq_disable();
  60. }
  61. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  62. {
  63. return flags == 0;
  64. }
  65. static inline bool arch_irqs_disabled(void)
  66. {
  67. return arch_irqs_disabled_flags(arch_local_save_flags());
  68. }
  69. #ifdef CONFIG_PPC_BOOK3E
  70. #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
  71. #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
  72. #else
  73. #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
  74. #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
  75. #endif
  76. #define hard_irq_disable() do { \
  77. u8 _was_enabled = get_paca()->soft_enabled; \
  78. __hard_irq_disable(); \
  79. get_paca()->soft_enabled = 0; \
  80. get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
  81. if (_was_enabled) \
  82. trace_hardirqs_off(); \
  83. } while(0)
  84. static inline bool lazy_irq_pending(void)
  85. {
  86. return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
  87. }
  88. /*
  89. * This is called by asynchronous interrupts to conditionally
  90. * re-enable hard interrupts when soft-disabled after having
  91. * cleared the source of the interrupt
  92. */
  93. static inline void may_hard_irq_enable(void)
  94. {
  95. get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
  96. if (!(get_paca()->irq_happened & PACA_IRQ_EE))
  97. __hard_irq_enable();
  98. }
  99. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  100. {
  101. return !regs->softe;
  102. }
  103. extern bool prep_irq_for_idle(void);
  104. #else /* CONFIG_PPC64 */
  105. #define SET_MSR_EE(x) mtmsr(x)
  106. static inline unsigned long arch_local_save_flags(void)
  107. {
  108. return mfmsr();
  109. }
  110. static inline void arch_local_irq_restore(unsigned long flags)
  111. {
  112. #if defined(CONFIG_BOOKE)
  113. asm volatile("wrtee %0" : : "r" (flags) : "memory");
  114. #else
  115. mtmsr(flags);
  116. #endif
  117. }
  118. static inline unsigned long arch_local_irq_save(void)
  119. {
  120. unsigned long flags = arch_local_save_flags();
  121. #ifdef CONFIG_BOOKE
  122. asm volatile("wrteei 0" : : : "memory");
  123. #else
  124. SET_MSR_EE(flags & ~MSR_EE);
  125. #endif
  126. return flags;
  127. }
  128. static inline void arch_local_irq_disable(void)
  129. {
  130. #ifdef CONFIG_BOOKE
  131. asm volatile("wrteei 0" : : : "memory");
  132. #else
  133. arch_local_irq_save();
  134. #endif
  135. }
  136. static inline void arch_local_irq_enable(void)
  137. {
  138. #ifdef CONFIG_BOOKE
  139. asm volatile("wrteei 1" : : : "memory");
  140. #else
  141. unsigned long msr = mfmsr();
  142. SET_MSR_EE(msr | MSR_EE);
  143. #endif
  144. }
  145. static inline bool arch_irqs_disabled_flags(unsigned long flags)
  146. {
  147. return (flags & MSR_EE) == 0;
  148. }
  149. static inline bool arch_irqs_disabled(void)
  150. {
  151. return arch_irqs_disabled_flags(arch_local_save_flags());
  152. }
  153. #define hard_irq_disable() arch_local_irq_disable()
  154. static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
  155. {
  156. return !(regs->msr & MSR_EE);
  157. }
  158. static inline void may_hard_irq_enable(void) { }
  159. #endif /* CONFIG_PPC64 */
  160. #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
  161. /*
  162. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  163. * or should we not care like we do now ? --BenH.
  164. */
  165. struct irq_chip;
  166. #endif /* __ASSEMBLY__ */
  167. #endif /* __KERNEL__ */
  168. #endif /* _ASM_POWERPC_HW_IRQ_H */