irq.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #ifndef __irq_h
  2. #define __irq_h
  3. /*
  4. * Please do not include this file in generic code. There is currently
  5. * no requirement for any architecture to implement anything held
  6. * within this file.
  7. *
  8. * Thanks. --rmk
  9. */
  10. #include <linux/config.h>
  11. #include <linux/smp.h>
  12. #if !defined(CONFIG_S390)
  13. #include <linux/linkage.h>
  14. #include <linux/cache.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/cpumask.h>
  17. #include <asm/irq.h>
  18. #include <asm/ptrace.h>
  19. /*
  20. * IRQ line status.
  21. */
  22. #define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
  23. #define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
  24. #define IRQ_PENDING 4 /* IRQ pending - replay on enable */
  25. #define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
  26. #define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
  27. #define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
  28. #define IRQ_LEVEL 64 /* IRQ level triggered */
  29. #define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
  30. #if defined(ARCH_HAS_IRQ_PER_CPU)
  31. # define IRQ_PER_CPU 256 /* IRQ is per CPU */
  32. # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
  33. #else
  34. # define CHECK_IRQ_PER_CPU(var) 0
  35. #endif
  36. /*
  37. * Interrupt controller descriptor. This is all we need
  38. * to describe about the low-level hardware.
  39. */
  40. struct hw_interrupt_type {
  41. const char * typename;
  42. unsigned int (*startup)(unsigned int irq);
  43. void (*shutdown)(unsigned int irq);
  44. void (*enable)(unsigned int irq);
  45. void (*disable)(unsigned int irq);
  46. void (*ack)(unsigned int irq);
  47. void (*end)(unsigned int irq);
  48. void (*set_affinity)(unsigned int irq, cpumask_t dest);
  49. /* Currently used only by UML, might disappear one day.*/
  50. #ifdef CONFIG_IRQ_RELEASE_METHOD
  51. void (*release)(unsigned int irq, void *dev_id);
  52. #endif
  53. };
  54. typedef struct hw_interrupt_type hw_irq_controller;
  55. /*
  56. * This is the "IRQ descriptor", which contains various information
  57. * about the irq, including what kind of hardware handling it has,
  58. * whether it is disabled etc etc.
  59. *
  60. * Pad this out to 32 bytes for cache and indexing reasons.
  61. */
  62. typedef struct irq_desc {
  63. hw_irq_controller *handler;
  64. void *handler_data;
  65. struct irqaction *action; /* IRQ action list */
  66. unsigned int status; /* IRQ status */
  67. unsigned int depth; /* nested irq disables */
  68. unsigned int irq_count; /* For detecting broken interrupts */
  69. unsigned int irqs_unhandled;
  70. spinlock_t lock;
  71. #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
  72. unsigned int move_irq; /* Flag need to re-target intr dest*/
  73. #endif
  74. } ____cacheline_aligned irq_desc_t;
  75. extern irq_desc_t irq_desc [NR_IRQS];
  76. /* Return a pointer to the irq descriptor for IRQ. */
  77. static inline irq_desc_t *
  78. irq_descp (int irq)
  79. {
  80. return irq_desc + irq;
  81. }
  82. #include <asm/hw_irq.h> /* the arch dependent stuff */
  83. extern int setup_irq(unsigned int irq, struct irqaction * new);
  84. #ifdef CONFIG_GENERIC_HARDIRQS
  85. extern cpumask_t irq_affinity[NR_IRQS];
  86. #ifdef CONFIG_SMP
  87. static inline void set_native_irq_info(int irq, cpumask_t mask)
  88. {
  89. irq_affinity[irq] = mask;
  90. }
  91. #else
  92. static inline void set_native_irq_info(int irq, cpumask_t mask)
  93. {
  94. }
  95. #endif
  96. #ifdef CONFIG_SMP
  97. #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
  98. extern cpumask_t pending_irq_cpumask[NR_IRQS];
  99. static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
  100. {
  101. irq_desc_t *desc = irq_desc + irq;
  102. unsigned long flags;
  103. spin_lock_irqsave(&desc->lock, flags);
  104. desc->move_irq = 1;
  105. pending_irq_cpumask[irq] = mask;
  106. spin_unlock_irqrestore(&desc->lock, flags);
  107. }
  108. static inline void
  109. move_native_irq(int irq)
  110. {
  111. cpumask_t tmp;
  112. irq_desc_t *desc = irq_descp(irq);
  113. if (likely (!desc->move_irq))
  114. return;
  115. desc->move_irq = 0;
  116. if (likely(cpus_empty(pending_irq_cpumask[irq])))
  117. return;
  118. if (!desc->handler->set_affinity)
  119. return;
  120. /* note - we hold the desc->lock */
  121. cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
  122. /*
  123. * If there was a valid mask to work with, please
  124. * do the disable, re-program, enable sequence.
  125. * This is *not* particularly important for level triggered
  126. * but in a edge trigger case, we might be setting rte
  127. * when an active trigger is comming in. This could
  128. * cause some ioapics to mal-function.
  129. * Being paranoid i guess!
  130. */
  131. if (unlikely(!cpus_empty(tmp))) {
  132. desc->handler->disable(irq);
  133. desc->handler->set_affinity(irq,tmp);
  134. desc->handler->enable(irq);
  135. }
  136. cpus_clear(pending_irq_cpumask[irq]);
  137. }
  138. #ifdef CONFIG_PCI_MSI
  139. /*
  140. * Wonder why these are dummies?
  141. * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq()
  142. * counter part after translating the vector to irq info. We need to perform
  143. * this operation on the real irq, when we dont use vector, i.e when
  144. * pci_use_vector() is false.
  145. */
  146. static inline void move_irq(int irq)
  147. {
  148. }
  149. static inline void set_irq_info(int irq, cpumask_t mask)
  150. {
  151. }
  152. #else // CONFIG_PCI_MSI
  153. static inline void move_irq(int irq)
  154. {
  155. move_native_irq(irq);
  156. }
  157. static inline void set_irq_info(int irq, cpumask_t mask)
  158. {
  159. set_native_irq_info(irq, mask);
  160. }
  161. #endif // CONFIG_PCI_MSI
  162. #else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE
  163. #define move_irq(x)
  164. #define move_native_irq(x)
  165. #define set_pending_irq(x,y)
  166. static inline void set_irq_info(int irq, cpumask_t mask)
  167. {
  168. set_native_irq_info(irq, mask);
  169. }
  170. #endif // CONFIG_GENERIC_PENDING_IRQ
  171. #else // CONFIG_SMP
  172. #define move_irq(x)
  173. #define move_native_irq(x)
  174. #endif // CONFIG_SMP
  175. extern int no_irq_affinity;
  176. extern int noirqdebug_setup(char *str);
  177. extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
  178. struct irqaction *action);
  179. extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
  180. extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
  181. int action_ret, struct pt_regs *regs);
  182. extern int can_request_irq(unsigned int irq, unsigned long irqflags);
  183. extern void init_irq_proc(void);
  184. #ifdef CONFIG_AUTO_IRQ_AFFINITY
  185. extern int select_smp_affinity(unsigned int irq);
  186. #else
  187. static inline int
  188. select_smp_affinity(unsigned int irq)
  189. {
  190. return 1;
  191. }
  192. #endif
  193. #endif
  194. extern hw_irq_controller no_irq_type; /* needed in every arch ? */
  195. #endif
  196. #endif /* __irq_h */