hardirq.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. #ifndef LINUX_HARDIRQ_H
  2. #define LINUX_HARDIRQ_H
  3. #include <linux/preempt.h>
  4. #include <linux/smp_lock.h>
  5. #include <linux/lockdep.h>
  6. #include <linux/ftrace_irq.h>
  7. #include <asm/hardirq.h>
  8. #include <asm/system.h>
  9. /*
  10. * We put the hardirq and softirq counter into the preemption
  11. * counter. The bitmask has the following meaning:
  12. *
  13. * - bits 0-7 are the preemption count (max preemption depth: 256)
  14. * - bits 8-15 are the softirq count (max # of softirqs: 256)
  15. *
  16. * The hardirq count can be overridden per architecture, the default is:
  17. *
  18. * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
  19. * - ( bit 28 is the PREEMPT_ACTIVE flag. )
  20. *
  21. * PREEMPT_MASK: 0x000000ff
  22. * SOFTIRQ_MASK: 0x0000ff00
  23. * HARDIRQ_MASK: 0x0fff0000
  24. */
  25. #define PREEMPT_BITS 8
  26. #define SOFTIRQ_BITS 8
  27. #ifndef HARDIRQ_BITS
  28. #define HARDIRQ_BITS 12
  29. #ifndef MAX_HARDIRQS_PER_CPU
  30. #define MAX_HARDIRQS_PER_CPU NR_IRQS
  31. #endif
  32. /*
  33. * The hardirq mask has to be large enough to have space for potentially
  34. * all IRQ sources in the system nesting on a single CPU.
  35. */
  36. #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
  37. # error HARDIRQ_BITS is too low!
  38. #endif
  39. #endif
  40. #define PREEMPT_SHIFT 0
  41. #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
  42. #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
  43. #define __IRQ_MASK(x) ((1UL << (x))-1)
  44. #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
  45. #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
  46. #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
  47. #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
  48. #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
  49. #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
  50. #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
  51. #error PREEMPT_ACTIVE is too low!
  52. #endif
  53. #define NMI_OFFSET (PREEMPT_ACTIVE << 1)
  54. #if NMI_OFFSET >= 0x80000000
  55. #error PREEMPT_ACTIVE too high!
  56. #endif
  57. #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
  58. #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
  59. #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
  60. /*
  61. * Are we doing bottom half or hardware interrupt processing?
  62. * Are we in a softirq context? Interrupt context?
  63. */
  64. #define in_irq() (hardirq_count())
  65. #define in_softirq() (softirq_count())
  66. #define in_interrupt() (irq_count())
  67. /*
  68. * Are we in NMI context?
  69. */
  70. #define in_nmi() (preempt_count() & NMI_OFFSET)
  71. #if defined(CONFIG_PREEMPT)
  72. # define PREEMPT_INATOMIC_BASE kernel_locked()
  73. # define PREEMPT_CHECK_OFFSET 1
  74. #else
  75. # define PREEMPT_INATOMIC_BASE 0
  76. # define PREEMPT_CHECK_OFFSET 0
  77. #endif
  78. /*
  79. * Are we running in atomic context? WARNING: this macro cannot
  80. * always detect atomic context; in particular, it cannot know about
  81. * held spinlocks in non-preemptible kernels. Thus it should not be
  82. * used in the general case to determine whether sleeping is possible.
  83. * Do not use in_atomic() in driver code.
  84. */
  85. #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
  86. /*
  87. * Check whether we were atomic before we did preempt_disable():
  88. * (used by the scheduler, *after* releasing the kernel lock)
  89. */
  90. #define in_atomic_preempt_off() \
  91. ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
  92. #ifdef CONFIG_PREEMPT
  93. # define preemptible() (preempt_count() == 0 && !irqs_disabled())
  94. # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
  95. #else
  96. # define preemptible() 0
  97. # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
  98. #endif
  99. #ifdef CONFIG_SMP
  100. extern void synchronize_irq(unsigned int irq);
  101. #else
  102. # define synchronize_irq(irq) barrier()
  103. #endif
  104. struct task_struct;
  105. #ifndef CONFIG_VIRT_CPU_ACCOUNTING
  106. static inline void account_system_vtime(struct task_struct *tsk)
  107. {
  108. }
  109. #endif
  110. #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
  111. extern void rcu_irq_enter(void);
  112. extern void rcu_irq_exit(void);
  113. extern void rcu_nmi_enter(void);
  114. extern void rcu_nmi_exit(void);
  115. #else
  116. # define rcu_irq_enter() do { } while (0)
  117. # define rcu_irq_exit() do { } while (0)
  118. # define rcu_nmi_enter() do { } while (0)
  119. # define rcu_nmi_exit() do { } while (0)
  120. #endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
  121. /*
  122. * It is safe to do non-atomic ops on ->hardirq_context,
  123. * because NMI handlers may not preempt and the ops are
  124. * always balanced, so the interrupted value of ->hardirq_context
  125. * will always be restored.
  126. */
  127. #define __irq_enter() \
  128. do { \
  129. account_system_vtime(current); \
  130. add_preempt_count(HARDIRQ_OFFSET); \
  131. trace_hardirq_enter(); \
  132. } while (0)
  133. /*
  134. * Enter irq context (on NO_HZ, update jiffies):
  135. */
  136. extern void irq_enter(void);
  137. /*
  138. * Exit irq context without processing softirqs:
  139. */
  140. #define __irq_exit() \
  141. do { \
  142. trace_hardirq_exit(); \
  143. account_system_vtime(current); \
  144. sub_preempt_count(HARDIRQ_OFFSET); \
  145. } while (0)
  146. /*
  147. * Exit irq context and process softirqs if needed:
  148. */
  149. extern void irq_exit(void);
  150. #define nmi_enter() \
  151. do { \
  152. ftrace_nmi_enter(); \
  153. BUG_ON(in_nmi()); \
  154. add_preempt_count(NMI_OFFSET); \
  155. lockdep_off(); \
  156. rcu_nmi_enter(); \
  157. __irq_enter(); \
  158. } while (0)
  159. #define nmi_exit() \
  160. do { \
  161. __irq_exit(); \
  162. rcu_nmi_exit(); \
  163. lockdep_on(); \
  164. BUG_ON(!in_nmi()); \
  165. sub_preempt_count(NMI_OFFSET); \
  166. ftrace_nmi_exit(); \
  167. } while (0)
  168. #endif /* LINUX_HARDIRQ_H */