internals.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * IRQ subsystem internal functions and variables:
  3. */
  4. #include <linux/irqdesc.h>
  5. extern int noirqdebug;
  6. #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
  7. /* Set default functions for irq_chip structures: */
  8. extern void irq_chip_set_defaults(struct irq_chip *chip);
  9. /* Set default handler: */
  10. extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
  11. extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
  12. unsigned long flags);
  13. extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
  14. extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
  15. extern struct lock_class_key irq_desc_lock_class;
  16. extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
  17. extern void clear_kstat_irqs(struct irq_desc *desc);
  18. extern raw_spinlock_t sparse_irq_lock;
  19. /* Resending of interrupts :*/
  20. void check_irq_resend(struct irq_desc *desc, unsigned int irq);
  21. #ifdef CONFIG_SPARSE_IRQ
  22. void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
  23. #endif
  24. #ifdef CONFIG_PROC_FS
  25. extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
  26. extern void register_handler_proc(unsigned int irq, struct irqaction *action);
  27. extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
  28. #else
  29. static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
  30. static inline void register_handler_proc(unsigned int irq,
  31. struct irqaction *action) { }
  32. static inline void unregister_handler_proc(unsigned int irq,
  33. struct irqaction *action) { }
  34. #endif
  35. extern int irq_select_affinity_usr(unsigned int irq);
  36. extern void irq_set_thread_affinity(struct irq_desc *desc);
  37. #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
  38. static inline void irq_end(unsigned int irq, struct irq_desc *desc)
  39. {
  40. if (desc->irq_data.chip && desc->irq_data.chip->end)
  41. desc->irq_data.chip->end(irq);
  42. }
  43. #else
  44. static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
  45. #endif
  46. /* Inline functions for support of irq chips on slow busses */
  47. static inline void chip_bus_lock(struct irq_desc *desc)
  48. {
  49. if (unlikely(desc->irq_data.chip->irq_bus_lock))
  50. desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
  51. }
  52. static inline void chip_bus_sync_unlock(struct irq_desc *desc)
  53. {
  54. if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
  55. desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
  56. }
  57. /*
  58. * Debugging printout:
  59. */
  60. #include <linux/kallsyms.h>
  61. #define P(f) if (desc->status & f) printk("%14s set\n", #f)
  62. static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
  63. {
  64. printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
  65. irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
  66. printk("->handle_irq(): %p, ", desc->handle_irq);
  67. print_symbol("%s\n", (unsigned long)desc->handle_irq);
  68. printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
  69. print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
  70. printk("->action(): %p\n", desc->action);
  71. if (desc->action) {
  72. printk("->action->handler(): %p, ", desc->action->handler);
  73. print_symbol("%s\n", (unsigned long)desc->action->handler);
  74. }
  75. P(IRQ_INPROGRESS);
  76. P(IRQ_DISABLED);
  77. P(IRQ_PENDING);
  78. P(IRQ_REPLAY);
  79. P(IRQ_AUTODETECT);
  80. P(IRQ_WAITING);
  81. P(IRQ_LEVEL);
  82. P(IRQ_MASKED);
  83. #ifdef CONFIG_IRQ_PER_CPU
  84. P(IRQ_PER_CPU);
  85. #endif
  86. P(IRQ_NOPROBE);
  87. P(IRQ_NOREQUEST);
  88. P(IRQ_NOAUTOEN);
  89. }
  90. #undef P
  91. /* Stuff below will be cleaned up after the sparse allocator is done */
  92. #ifdef CONFIG_SMP
  93. /**
  94. * alloc_desc_masks - allocate cpumasks for irq_desc
  95. * @desc: pointer to irq_desc struct
  96. * @node: node which will be handling the cpumasks
  97. * @boot: true if need bootmem
  98. *
  99. * Allocates affinity and pending_mask cpumask if required.
  100. * Returns true if successful (or not required).
  101. */
  102. static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
  103. bool boot)
  104. {
  105. gfp_t gfp = GFP_ATOMIC;
  106. if (boot)
  107. gfp = GFP_NOWAIT;
  108. #ifdef CONFIG_CPUMASK_OFFSTACK
  109. if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
  110. return false;
  111. #ifdef CONFIG_GENERIC_PENDING_IRQ
  112. if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
  113. free_cpumask_var(desc->irq_data.affinity);
  114. return false;
  115. }
  116. #endif
  117. #endif
  118. return true;
  119. }
  120. static inline void init_desc_masks(struct irq_desc *desc)
  121. {
  122. cpumask_setall(desc->irq_data.affinity);
  123. #ifdef CONFIG_GENERIC_PENDING_IRQ
  124. cpumask_clear(desc->pending_mask);
  125. #endif
  126. }
  127. /**
  128. * init_copy_desc_masks - copy cpumasks for irq_desc
  129. * @old_desc: pointer to old irq_desc struct
  130. * @new_desc: pointer to new irq_desc struct
  131. *
  132. * Insures affinity and pending_masks are copied to new irq_desc.
  133. * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
  134. * irq_desc struct so the copy is redundant.
  135. */
  136. static inline void init_copy_desc_masks(struct irq_desc *old_desc,
  137. struct irq_desc *new_desc)
  138. {
  139. #ifdef CONFIG_CPUMASK_OFFSTACK
  140. cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
  141. #ifdef CONFIG_GENERIC_PENDING_IRQ
  142. cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
  143. #endif
  144. #endif
  145. }
  146. static inline void free_desc_masks(struct irq_desc *old_desc,
  147. struct irq_desc *new_desc)
  148. {
  149. free_cpumask_var(old_desc->irq_data.affinity);
  150. #ifdef CONFIG_GENERIC_PENDING_IRQ
  151. free_cpumask_var(old_desc->pending_mask);
  152. #endif
  153. }
  154. #else /* !CONFIG_SMP */
  155. static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
  156. bool boot)
  157. {
  158. return true;
  159. }
  160. static inline void init_desc_masks(struct irq_desc *desc)
  161. {
  162. }
  163. static inline void init_copy_desc_masks(struct irq_desc *old_desc,
  164. struct irq_desc *new_desc)
  165. {
  166. }
  167. static inline void free_desc_masks(struct irq_desc *old_desc,
  168. struct irq_desc *new_desc)
  169. {
  170. }
  171. #endif /* CONFIG_SMP */