hardirq.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. #ifndef LINUX_HARDIRQ_H
  2. #define LINUX_HARDIRQ_H
  3. #include <linux/config.h>
  4. #include <linux/preempt.h>
  5. #include <linux/smp_lock.h>
  6. #include <asm/hardirq.h>
  7. #include <asm/system.h>
  8. /*
  9. * We put the hardirq and softirq counter into the preemption
  10. * counter. The bitmask has the following meaning:
  11. *
  12. * - bits 0-7 are the preemption count (max preemption depth: 256)
  13. * - bits 8-15 are the softirq count (max # of softirqs: 256)
  14. *
  15. * The hardirq count can be overridden per architecture, the default is:
  16. *
  17. * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
  18. * - ( bit 28 is the PREEMPT_ACTIVE flag. )
  19. *
  20. * PREEMPT_MASK: 0x000000ff
  21. * SOFTIRQ_MASK: 0x0000ff00
  22. * HARDIRQ_MASK: 0x0fff0000
  23. */
  24. #define PREEMPT_BITS 8
  25. #define SOFTIRQ_BITS 8
  26. #ifndef HARDIRQ_BITS
  27. #define HARDIRQ_BITS 12
  28. /*
  29. * The hardirq mask has to be large enough to have space for potentially
  30. * all IRQ sources in the system nesting on a single CPU.
  31. */
  32. #if (1 << HARDIRQ_BITS) < NR_IRQS
  33. # error HARDIRQ_BITS is too low!
  34. #endif
  35. #endif
  36. #define PREEMPT_SHIFT 0
  37. #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
  38. #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
  39. #define __IRQ_MASK(x) ((1UL << (x))-1)
  40. #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
  41. #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
  42. #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
  43. #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
  44. #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
  45. #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
  46. #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
  47. #error PREEMPT_ACTIVE is too low!
  48. #endif
  49. #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
  50. #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
  51. #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
  52. /*
  53. * Are we doing bottom half or hardware interrupt processing?
  54. * Are we in a softirq context? Interrupt context?
  55. */
  56. #define in_irq() (hardirq_count())
  57. #define in_softirq() (softirq_count())
  58. #define in_interrupt() (irq_count())
  59. #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
  60. # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
  61. #else
  62. # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
  63. #endif
  64. #ifdef CONFIG_PREEMPT
  65. # define preemptible() (preempt_count() == 0 && !irqs_disabled())
  66. # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
  67. #else
  68. # define preemptible() 0
  69. # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
  70. #endif
  71. #ifdef CONFIG_SMP
  72. extern void synchronize_irq(unsigned int irq);
  73. #else
  74. # define synchronize_irq(irq) barrier()
  75. #endif
  76. #define nmi_enter() irq_enter()
  77. #define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
  78. #ifndef CONFIG_VIRT_CPU_ACCOUNTING
  79. static inline void account_user_vtime(struct task_struct *tsk)
  80. {
  81. }
  82. static inline void account_system_vtime(struct task_struct *tsk)
  83. {
  84. }
  85. #endif
  86. #define irq_enter() \
  87. do { \
  88. account_system_vtime(current); \
  89. add_preempt_count(HARDIRQ_OFFSET); \
  90. } while (0)
  91. extern void irq_exit(void);
  92. #endif /* LINUX_HARDIRQ_H */