hw_irq.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /*
  2. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  3. */
  4. #ifndef _ASM_POWERPC_HW_IRQ_H
  5. #define _ASM_POWERPC_HW_IRQ_H
  6. #ifdef __KERNEL__
  7. #include <linux/errno.h>
  8. #include <linux/compiler.h>
  9. #include <asm/ptrace.h>
  10. #include <asm/processor.h>
  11. extern void timer_interrupt(struct pt_regs *);
  12. #ifdef CONFIG_PPC64
  13. #include <asm/paca.h>
  14. static inline unsigned long local_get_flags(void)
  15. {
  16. unsigned long flags;
  17. __asm__ __volatile__("lbz %0,%1(13)"
  18. : "=r" (flags)
  19. : "i" (offsetof(struct paca_struct, soft_enabled)));
  20. return flags;
  21. }
  22. static inline unsigned long raw_local_irq_disable(void)
  23. {
  24. unsigned long flags, zero;
  25. __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
  26. : "=r" (flags), "=&r" (zero)
  27. : "i" (offsetof(struct paca_struct, soft_enabled))
  28. : "memory");
  29. return flags;
  30. }
  31. extern void raw_local_irq_restore(unsigned long);
  32. extern void iseries_handle_interrupts(void);
  33. #define raw_local_irq_enable() raw_local_irq_restore(1)
  34. #define raw_local_save_flags(flags) ((flags) = local_get_flags())
  35. #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
  36. #define raw_irqs_disabled() (local_get_flags() == 0)
  37. #define raw_irqs_disabled_flags(flags) ((flags) == 0)
  38. #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
  39. #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
  40. #define hard_irq_disable() \
  41. do { \
  42. __hard_irq_disable(); \
  43. get_paca()->soft_enabled = 0; \
  44. get_paca()->hard_enabled = 0; \
  45. } while(0)
  46. static inline int irqs_disabled_flags(unsigned long flags)
  47. {
  48. return flags == 0;
  49. }
  50. #else
  51. #if defined(CONFIG_BOOKE)
  52. #define SET_MSR_EE(x) mtmsr(x)
  53. #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
  54. #else
  55. #define SET_MSR_EE(x) mtmsr(x)
  56. #define local_irq_restore(flags) mtmsr(flags)
  57. #endif
  58. static inline void local_irq_disable(void)
  59. {
  60. #ifdef CONFIG_BOOKE
  61. __asm__ __volatile__("wrteei 0": : :"memory");
  62. #else
  63. unsigned long msr;
  64. __asm__ __volatile__("": : :"memory");
  65. msr = mfmsr();
  66. SET_MSR_EE(msr & ~MSR_EE);
  67. #endif
  68. }
  69. static inline void local_irq_enable(void)
  70. {
  71. #ifdef CONFIG_BOOKE
  72. __asm__ __volatile__("wrteei 1": : :"memory");
  73. #else
  74. unsigned long msr;
  75. __asm__ __volatile__("": : :"memory");
  76. msr = mfmsr();
  77. SET_MSR_EE(msr | MSR_EE);
  78. #endif
  79. }
  80. static inline void local_irq_save_ptr(unsigned long *flags)
  81. {
  82. unsigned long msr;
  83. msr = mfmsr();
  84. *flags = msr;
  85. #ifdef CONFIG_BOOKE
  86. __asm__ __volatile__("wrteei 0": : :"memory");
  87. #else
  88. SET_MSR_EE(msr & ~MSR_EE);
  89. #endif
  90. __asm__ __volatile__("": : :"memory");
  91. }
  92. #define local_save_flags(flags) ((flags) = mfmsr())
  93. #define local_irq_save(flags) local_irq_save_ptr(&flags)
  94. #define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
  95. #define hard_irq_enable() local_irq_enable()
  96. #define hard_irq_disable() local_irq_disable()
  97. static inline int irqs_disabled_flags(unsigned long flags)
  98. {
  99. return (flags & MSR_EE) == 0;
  100. }
  101. #endif /* CONFIG_PPC64 */
  102. /*
  103. * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  104. * or should we not care like we do now ? --BenH.
  105. */
  106. struct irq_chip;
  107. #ifdef CONFIG_PERF_COUNTERS
  108. static inline unsigned long test_perf_counter_pending(void)
  109. {
  110. unsigned long x;
  111. asm volatile("lbz %0,%1(13)"
  112. : "=r" (x)
  113. : "i" (offsetof(struct paca_struct, perf_counter_pending)));
  114. return x;
  115. }
  116. static inline void set_perf_counter_pending(void)
  117. {
  118. asm volatile("stb %0,%1(13)" : :
  119. "r" (1),
  120. "i" (offsetof(struct paca_struct, perf_counter_pending)));
  121. }
  122. static inline void clear_perf_counter_pending(void)
  123. {
  124. asm volatile("stb %0,%1(13)" : :
  125. "r" (0),
  126. "i" (offsetof(struct paca_struct, perf_counter_pending)));
  127. }
  128. extern void perf_counter_do_pending(void);
  129. #else
  130. static inline unsigned long test_perf_counter_pending(void)
  131. {
  132. return 0;
  133. }
  134. static inline void set_perf_counter_pending(void) {}
  135. static inline void clear_perf_counter_pending(void) {}
  136. static inline void perf_counter_do_pending(void) {}
  137. #endif /* CONFIG_PERF_COUNTERS */
  138. #endif /* __KERNEL__ */
  139. #endif /* _ASM_POWERPC_HW_IRQ_H */