irqdesc.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  3. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  4. *
  5. * This file contains the interrupt descriptor management code
  6. *
  7. * Detailed information is available in Documentation/DocBook/genericirq
  8. *
  9. */
  10. #include <linux/irq.h>
  11. #include <linux/slab.h>
  12. #include <linux/module.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/radix-tree.h>
  16. #include "internals.h"
  17. /*
  18. * lockdep: we want to handle all irq_desc locks as a single lock-class:
  19. */
  20. struct lock_class_key irq_desc_lock_class;
  21. #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
  22. static void __init init_irq_default_affinity(void)
  23. {
  24. alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
  25. cpumask_setall(irq_default_affinity);
  26. }
  27. #else
  28. static void __init init_irq_default_affinity(void)
  29. {
  30. }
  31. #endif
  32. int nr_irqs = NR_IRQS;
  33. EXPORT_SYMBOL_GPL(nr_irqs);
  34. #ifdef CONFIG_SPARSE_IRQ
  35. static struct irq_desc irq_desc_init = {
  36. .status = IRQ_DEFAULT_INIT_FLAGS,
  37. .handle_irq = handle_bad_irq,
  38. .depth = 1,
  39. .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
  40. };
  41. void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
  42. {
  43. void *ptr;
  44. ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
  45. GFP_ATOMIC, node);
  46. /*
  47. * don't overwite if can not get new one
  48. * init_copy_kstat_irqs() could still use old one
  49. */
  50. if (ptr) {
  51. printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
  52. desc->kstat_irqs = ptr;
  53. }
  54. }
  55. static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
  56. {
  57. memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
  58. raw_spin_lock_init(&desc->lock);
  59. desc->irq_data.irq = irq;
  60. #ifdef CONFIG_SMP
  61. desc->irq_data.node = node;
  62. #endif
  63. lockdep_set_class(&desc->lock, &irq_desc_lock_class);
  64. init_kstat_irqs(desc, node, nr_cpu_ids);
  65. if (!desc->kstat_irqs) {
  66. printk(KERN_ERR "can not alloc kstat_irqs\n");
  67. BUG_ON(1);
  68. }
  69. if (!alloc_desc_masks(desc, node, false)) {
  70. printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
  71. BUG_ON(1);
  72. }
  73. init_desc_masks(desc);
  74. arch_init_chip_data(desc, node);
  75. }
  76. /*
  77. * Protect the sparse_irqs:
  78. */
  79. DEFINE_RAW_SPINLOCK(sparse_irq_lock);
  80. static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
  81. static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
  82. {
  83. radix_tree_insert(&irq_desc_tree, irq, desc);
  84. }
  85. struct irq_desc *irq_to_desc(unsigned int irq)
  86. {
  87. return radix_tree_lookup(&irq_desc_tree, irq);
  88. }
  89. void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
  90. {
  91. void **ptr;
  92. ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
  93. if (ptr)
  94. radix_tree_replace_slot(ptr, desc);
  95. }
  96. static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
  97. [0 ... NR_IRQS_LEGACY-1] = {
  98. .status = IRQ_DEFAULT_INIT_FLAGS,
  99. .handle_irq = handle_bad_irq,
  100. .depth = 1,
  101. .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
  102. }
  103. };
  104. static unsigned int *kstat_irqs_legacy;
  105. int __init early_irq_init(void)
  106. {
  107. struct irq_desc *desc;
  108. int legacy_count;
  109. int node;
  110. int i;
  111. init_irq_default_affinity();
  112. /* initialize nr_irqs based on nr_cpu_ids */
  113. arch_probe_nr_irqs();
  114. printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
  115. desc = irq_desc_legacy;
  116. legacy_count = ARRAY_SIZE(irq_desc_legacy);
  117. node = first_online_node;
  118. /* allocate based on nr_cpu_ids */
  119. kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
  120. sizeof(int), GFP_NOWAIT, node);
  121. irq_desc_init.irq_data.chip = &no_irq_chip;
  122. for (i = 0; i < legacy_count; i++) {
  123. desc[i].irq_data.irq = i;
  124. desc[i].irq_data.chip = &no_irq_chip;
  125. #ifdef CONFIG_SMP
  126. desc[i].irq_data.node = node;
  127. #endif
  128. desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
  129. lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
  130. alloc_desc_masks(&desc[i], node, true);
  131. init_desc_masks(&desc[i]);
  132. set_irq_desc(i, &desc[i]);
  133. }
  134. return arch_early_irq_init();
  135. }
  136. struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
  137. {
  138. struct irq_desc *desc;
  139. unsigned long flags;
  140. if (irq >= nr_irqs) {
  141. WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
  142. irq, nr_irqs);
  143. return NULL;
  144. }
  145. desc = irq_to_desc(irq);
  146. if (desc)
  147. return desc;
  148. raw_spin_lock_irqsave(&sparse_irq_lock, flags);
  149. /* We have to check it to avoid races with another CPU */
  150. desc = irq_to_desc(irq);
  151. if (desc)
  152. goto out_unlock;
  153. desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
  154. printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
  155. if (!desc) {
  156. printk(KERN_ERR "can not alloc irq_desc\n");
  157. BUG_ON(1);
  158. }
  159. init_one_irq_desc(irq, desc, node);
  160. set_irq_desc(irq, desc);
  161. out_unlock:
  162. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  163. return desc;
  164. }
  165. #else /* !CONFIG_SPARSE_IRQ */
  166. struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
  167. [0 ... NR_IRQS-1] = {
  168. .status = IRQ_DEFAULT_INIT_FLAGS,
  169. .handle_irq = handle_bad_irq,
  170. .depth = 1,
  171. .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
  172. }
  173. };
  174. static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
  175. int __init early_irq_init(void)
  176. {
  177. struct irq_desc *desc;
  178. int count;
  179. int i;
  180. init_irq_default_affinity();
  181. printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
  182. desc = irq_desc;
  183. count = ARRAY_SIZE(irq_desc);
  184. for (i = 0; i < count; i++) {
  185. desc[i].irq_data.irq = i;
  186. desc[i].irq_data.chip = &no_irq_chip;
  187. alloc_desc_masks(&desc[i], 0, true);
  188. init_desc_masks(&desc[i]);
  189. desc[i].kstat_irqs = kstat_irqs_all[i];
  190. lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
  191. }
  192. return arch_early_irq_init();
  193. }
  194. struct irq_desc *irq_to_desc(unsigned int irq)
  195. {
  196. return (irq < NR_IRQS) ? irq_desc + irq : NULL;
  197. }
  198. struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
  199. {
  200. return irq_to_desc(irq);
  201. }
  202. #endif /* !CONFIG_SPARSE_IRQ */
  203. void clear_kstat_irqs(struct irq_desc *desc)
  204. {
  205. memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
  206. }
  207. unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
  208. {
  209. struct irq_desc *desc = irq_to_desc(irq);
  210. return desc ? desc->kstat_irqs[cpu] : 0;
  211. }