hw_irq.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  3. */
  4. #ifndef _ASM_POWERPC_HW_IRQ_H
  5. #define _ASM_POWERPC_HW_IRQ_H
  6. #ifdef __KERNEL__
  7. #include <linux/errno.h>
  8. #include <linux/compiler.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/processor.h>
  11. extern void timer_interrupt(struct pt_regs *);
  12. #ifdef CONFIG_PPC64
  13. #include <asm/paca.h>
  14. static inline unsigned long local_get_flags(void)
  15. {
  16. unsigned long flags;
  17. __asm__ __volatile__("lbz %0,%1(13)"
  18. : "=r" (flags)
  19. : "i" (offsetof(struct paca_struct, soft_enabled)));
  20. return flags;
  21. }
  22. static inline unsigned long raw_local_irq_disable(void)
  23. {
  24. unsigned long flags, zero;
  25. __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
  26. : "=r" (flags), "=&r" (zero)
  27. : "i" (offsetof(struct paca_struct, soft_enabled))
  28. : "memory");
  29. return flags;
  30. }
  31. extern void raw_local_irq_restore(unsigned long);
  32. extern void iseries_handle_interrupts(void);
  33. #define raw_local_irq_enable() raw_local_irq_restore(1)
  34. #define raw_local_save_flags(flags) ((flags) = local_get_flags())
  35. #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
  36. #define raw_irqs_disabled() (local_get_flags() == 0)
  37. #define raw_irqs_disabled_flags(flags) ((flags) == 0)
  38. #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
  39. #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
  40. #define hard_irq_disable() \
  41. do { \
  42. __hard_irq_disable(); \
  43. get_paca()->soft_enabled = 0; \
  44. get_paca()->hard_enabled = 0; \
  45. } while(0)
  46. static inline int irqs_disabled_flags(unsigned long flags)
  47. {
  48. return flags == 0;
  49. }
  50. #else
  51. #if defined(CONFIG_BOOKE)
  52. #define SET_MSR_EE(x) mtmsr(x)
  53. #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
  54. #else
  55. #define SET_MSR_EE(x) mtmsr(x)
  56. #define local_irq_restore(flags) mtmsr(flags)
  57. #endif
  58. static inline void local_irq_disable(void)
  59. {
  60. #ifdef CONFIG_BOOKE
  61. __asm__ __volatile__("wrteei 0": : :"memory");
  62. #else
  63. unsigned long msr;
  64. msr = mfmsr();
  65. SET_MSR_EE(msr & ~MSR_EE);
  66. #endif
  67. }
  68. static inline void local_irq_enable(void)
  69. {
  70. #ifdef CONFIG_BOOKE
  71. __asm__ __volatile__("wrteei 1": : :"memory");
  72. #else
  73. unsigned long msr;
  74. msr = mfmsr();
  75. SET_MSR_EE(msr | MSR_EE);
  76. #endif
  77. }
  78. static inline void local_irq_save_ptr(unsigned long *flags)
  79. {
  80. unsigned long msr;
  81. msr = mfmsr();
  82. *flags = msr;
  83. #ifdef CONFIG_BOOKE
  84. __asm__ __volatile__("wrteei 0": : :"memory");
  85. #else
  86. SET_MSR_EE(msr & ~MSR_EE);
  87. #endif
  88. }
  89. #define local_save_flags(flags) ((flags) = mfmsr())
  90. #define local_irq_save(flags) local_irq_save_ptr(&flags)
  91. #define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
  92. #define hard_irq_enable() local_irq_enable()
  93. #define hard_irq_disable() local_irq_disable()
  94. static inline int irqs_disabled_flags(unsigned long flags)
  95. {
  96. return (flags & MSR_EE) == 0;
  97. }
  98. #endif /* CONFIG_PPC64 */
  99. /*
  100. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  101. * or should we not care like we do now ? --BenH.
  102. */
  103. struct irq_chip;
  104. #ifdef CONFIG_PERF_COUNTERS
  105. #ifdef CONFIG_PPC64
  106. static inline unsigned long test_perf_counter_pending(void)
  107. {
  108. unsigned long x;
  109. asm volatile("lbz %0,%1(13)"
  110. : "=r" (x)
  111. : "i" (offsetof(struct paca_struct, perf_counter_pending)));
  112. return x;
  113. }
  114. static inline void set_perf_counter_pending(void)
  115. {
  116. asm volatile("stb %0,%1(13)" : :
  117. "r" (1),
  118. "i" (offsetof(struct paca_struct, perf_counter_pending)));
  119. }
  120. static inline void clear_perf_counter_pending(void)
  121. {
  122. asm volatile("stb %0,%1(13)" : :
  123. "r" (0),
  124. "i" (offsetof(struct paca_struct, perf_counter_pending)));
  125. }
  126. #endif /* CONFIG_PPC64 */
  127. #else /* CONFIG_PERF_COUNTERS */
  128. static inline unsigned long test_perf_counter_pending(void)
  129. {
  130. return 0;
  131. }
  132. static inline void clear_perf_counter_pending(void) {}
  133. #endif /* CONFIG_PERF_COUNTERS */
  134. #endif /* __KERNEL__ */
  135. #endif /* _ASM_POWERPC_HW_IRQ_H */