irqdesc.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  3. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  4. *
  5. * This file contains the interrupt descriptor management code
  6. *
  7. * Detailed information is available in Documentation/DocBook/genericirq
  8. *
  9. */
  10. #include <linux/irq.h>
  11. #include <linux/slab.h>
  12. #include <linux/module.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/radix-tree.h>
  16. #include <linux/bitmap.h>
  17. #include "internals.h"
  18. /*
  19. * lockdep: we want to handle all irq_desc locks as a single lock-class:
  20. */
  21. struct lock_class_key irq_desc_lock_class;
  22. #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
  23. static void __init init_irq_default_affinity(void)
  24. {
  25. alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
  26. cpumask_setall(irq_default_affinity);
  27. }
  28. #else
  29. static void __init init_irq_default_affinity(void)
  30. {
  31. }
  32. #endif
  33. #ifdef CONFIG_SMP
  34. static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
  35. {
  36. if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
  37. return -ENOMEM;
  38. #ifdef CONFIG_GENERIC_PENDING_IRQ
  39. if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
  40. free_cpumask_var(desc->irq_data.affinity);
  41. return -ENOMEM;
  42. }
  43. #endif
  44. return 0;
  45. }
  46. static void desc_smp_init(struct irq_desc *desc, int node)
  47. {
  48. desc->irq_data.node = node;
  49. cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
  50. #ifdef CONFIG_GENERIC_PENDING_IRQ
  51. cpumask_clear(desc->pending_mask);
  52. #endif
  53. }
  54. static inline int desc_node(struct irq_desc *desc)
  55. {
  56. return desc->irq_data.node;
  57. }
  58. #else
  59. static inline int
  60. alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
  61. static inline void desc_smp_init(struct irq_desc *desc, int node) { }
  62. static inline int desc_node(struct irq_desc *desc) { return 0; }
  63. #endif
  64. static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
  65. {
  66. desc->irq_data.irq = irq;
  67. desc->irq_data.chip = &no_irq_chip;
  68. desc->irq_data.chip_data = NULL;
  69. desc->irq_data.handler_data = NULL;
  70. desc->irq_data.msi_desc = NULL;
  71. desc->status = IRQ_DEFAULT_INIT_FLAGS;
  72. desc->handle_irq = handle_bad_irq;
  73. desc->depth = 1;
  74. desc->irq_count = 0;
  75. desc->irqs_unhandled = 0;
  76. desc->name = NULL;
  77. memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
  78. desc_smp_init(desc, node);
  79. }
  80. int nr_irqs = NR_IRQS;
  81. EXPORT_SYMBOL_GPL(nr_irqs);
  82. DEFINE_RAW_SPINLOCK(sparse_irq_lock);
  83. static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
  84. #ifdef CONFIG_SPARSE_IRQ
  85. void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
  86. {
  87. void *ptr;
  88. ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
  89. GFP_ATOMIC, node);
  90. /*
  91. * don't overwite if can not get new one
  92. * init_copy_kstat_irqs() could still use old one
  93. */
  94. if (ptr) {
  95. printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
  96. desc->kstat_irqs = ptr;
  97. }
  98. }
  99. static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
  100. static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
  101. {
  102. radix_tree_insert(&irq_desc_tree, irq, desc);
  103. }
  104. struct irq_desc *irq_to_desc(unsigned int irq)
  105. {
  106. return radix_tree_lookup(&irq_desc_tree, irq);
  107. }
  108. void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
  109. {
  110. void **ptr;
  111. ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
  112. if (ptr)
  113. radix_tree_replace_slot(ptr, desc);
  114. }
  115. static void delete_irq_desc(unsigned int irq)
  116. {
  117. radix_tree_delete(&irq_desc_tree, irq);
  118. }
  119. #ifdef CONFIG_SMP
  120. static void free_masks(struct irq_desc *desc)
  121. {
  122. #ifdef CONFIG_GENERIC_PENDING_IRQ
  123. free_cpumask_var(desc->pending_mask);
  124. #endif
  125. free_cpumask_var(desc->affinity);
  126. }
  127. #else
  128. static inline void free_masks(struct irq_desc *desc) { }
  129. #endif
  130. static struct irq_desc *alloc_desc(int irq, int node)
  131. {
  132. /* Temporary hack until we can switch to GFP_KERNEL */
  133. gfp_t gfp = gfp_allowed_mask == GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC;
  134. struct irq_desc *desc;
  135. desc = kzalloc_node(sizeof(*desc), gfp, node);
  136. if (!desc)
  137. return NULL;
  138. /* allocate based on nr_cpu_ids */
  139. desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
  140. gfp, node);
  141. if (!desc->kstat_irqs)
  142. goto err_desc;
  143. if (alloc_masks(desc, gfp, node))
  144. goto err_kstat;
  145. raw_spin_lock_init(&desc->lock);
  146. lockdep_set_class(&desc->lock, &irq_desc_lock_class);
  147. desc_set_defaults(irq, desc, node);
  148. return desc;
  149. err_kstat:
  150. kfree(desc->kstat_irqs);
  151. err_desc:
  152. kfree(desc);
  153. return NULL;
  154. }
  155. static void free_desc(unsigned int irq)
  156. {
  157. struct irq_desc *desc = irq_to_desc(irq);
  158. unsigned long flags;
  159. unregister_irq_proc(irq, desc);
  160. raw_spin_lock_irqsave(&sparse_irq_lock, flags);
  161. delete_irq_desc(irq);
  162. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  163. free_masks(desc);
  164. kfree(desc->kstat_irqs);
  165. kfree(desc);
  166. }
  167. static int alloc_descs(unsigned int start, unsigned int cnt, int node)
  168. {
  169. struct irq_desc *desc;
  170. unsigned long flags;
  171. int i;
  172. for (i = 0; i < cnt; i++) {
  173. desc = alloc_desc(start + i, node);
  174. if (!desc)
  175. goto err;
  176. raw_spin_lock_irqsave(&sparse_irq_lock, flags);
  177. irq_insert_desc(start + i, desc);
  178. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  179. }
  180. return start;
  181. err:
  182. for (i--; i >= 0; i--)
  183. free_desc(start + i);
  184. raw_spin_lock_irqsave(&sparse_irq_lock, flags);
  185. bitmap_clear(allocated_irqs, start, cnt);
  186. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  187. return -ENOMEM;
  188. }
  189. struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
  190. {
  191. int res = irq_alloc_descs(irq, irq, 1, node);
  192. if (res == -EEXIST || res == irq)
  193. return irq_to_desc(irq);
  194. return NULL;
  195. }
  196. int __init early_irq_init(void)
  197. {
  198. int i, initcnt, node = first_online_node;
  199. struct irq_desc *desc;
  200. init_irq_default_affinity();
  201. /* Let arch update nr_irqs and return the nr of preallocated irqs */
  202. initcnt = arch_probe_nr_irqs();
  203. printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
  204. for (i = 0; i < initcnt; i++) {
  205. desc = alloc_desc(i, node);
  206. set_bit(i, allocated_irqs);
  207. irq_insert_desc(i, desc);
  208. }
  209. return arch_early_irq_init();
  210. }
  211. #else /* !CONFIG_SPARSE_IRQ */
  212. struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
  213. [0 ... NR_IRQS-1] = {
  214. .status = IRQ_DEFAULT_INIT_FLAGS,
  215. .handle_irq = handle_bad_irq,
  216. .depth = 1,
  217. .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
  218. }
  219. };
  220. static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
  221. int __init early_irq_init(void)
  222. {
  223. int count, i, node = first_online_node;
  224. struct irq_desc *desc;
  225. init_irq_default_affinity();
  226. printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
  227. desc = irq_desc;
  228. count = ARRAY_SIZE(irq_desc);
  229. for (i = 0; i < count; i++) {
  230. desc[i].irq_data.irq = i;
  231. desc[i].irq_data.chip = &no_irq_chip;
  232. desc[i].kstat_irqs = kstat_irqs_all[i];
  233. alloc_masks(desc + i, GFP_KERNEL, node);
  234. desc_smp_init(desc + i, node);
  235. lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
  236. }
  237. return arch_early_irq_init();
  238. }
  239. struct irq_desc *irq_to_desc(unsigned int irq)
  240. {
  241. return (irq < NR_IRQS) ? irq_desc + irq : NULL;
  242. }
  243. struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
  244. {
  245. return irq_to_desc(irq);
  246. }
  247. static void free_desc(unsigned int irq)
  248. {
  249. dynamic_irq_cleanup(irq);
  250. }
  251. static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
  252. {
  253. return start;
  254. }
  255. #endif /* !CONFIG_SPARSE_IRQ */
  256. /* Dynamic interrupt handling */
  257. /**
  258. * irq_free_descs - free irq descriptors
  259. * @from: Start of descriptor range
  260. * @cnt: Number of consecutive irqs to free
  261. */
  262. void irq_free_descs(unsigned int from, unsigned int cnt)
  263. {
  264. unsigned long flags;
  265. int i;
  266. if (from >= nr_irqs || (from + cnt) > nr_irqs)
  267. return;
  268. for (i = 0; i < cnt; i++)
  269. free_desc(from + i);
  270. raw_spin_lock_irqsave(&sparse_irq_lock, flags);
  271. bitmap_clear(allocated_irqs, from, cnt);
  272. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  273. }
  274. /**
  275. * irq_alloc_descs - allocate and initialize a range of irq descriptors
  276. * @irq: Allocate for specific irq number if irq >= 0
  277. * @from: Start the search from this irq number
  278. * @cnt: Number of consecutive irqs to allocate.
  279. * @node: Preferred node on which the irq descriptor should be allocated
  280. *
  281. * Returns the first irq number or error code
  282. */
  283. int __ref
  284. irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
  285. {
  286. unsigned long flags;
  287. int start, ret;
  288. if (!cnt)
  289. return -EINVAL;
  290. raw_spin_lock_irqsave(&sparse_irq_lock, flags);
  291. start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
  292. ret = -EEXIST;
  293. if (irq >=0 && start != irq)
  294. goto err;
  295. ret = -ENOMEM;
  296. if (start >= nr_irqs)
  297. goto err;
  298. bitmap_set(allocated_irqs, start, cnt);
  299. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  300. return alloc_descs(start, cnt, node);
  301. err:
  302. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  303. return ret;
  304. }
  305. /**
  306. * irq_reserve_irqs - mark irqs allocated
  307. * @from: mark from irq number
  308. * @cnt: number of irqs to mark
  309. *
  310. * Returns 0 on success or an appropriate error code
  311. */
  312. int irq_reserve_irqs(unsigned int from, unsigned int cnt)
  313. {
  314. unsigned long flags;
  315. unsigned int start;
  316. int ret = 0;
  317. if (!cnt || (from + cnt) > nr_irqs)
  318. return -EINVAL;
  319. raw_spin_lock_irqsave(&sparse_irq_lock, flags);
  320. start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
  321. if (start == from)
  322. bitmap_set(allocated_irqs, start, cnt);
  323. else
  324. ret = -EEXIST;
  325. raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
  326. return ret;
  327. }
  328. /**
  329. * irq_get_next_irq - get next allocated irq number
  330. * @offset: where to start the search
  331. *
  332. * Returns next irq number after offset or nr_irqs if none is found.
  333. */
  334. unsigned int irq_get_next_irq(unsigned int offset)
  335. {
  336. return find_next_bit(allocated_irqs, nr_irqs, offset);
  337. }
  338. /**
  339. * dynamic_irq_cleanup - cleanup a dynamically allocated irq
  340. * @irq: irq number to initialize
  341. */
  342. void dynamic_irq_cleanup(unsigned int irq)
  343. {
  344. struct irq_desc *desc = irq_to_desc(irq);
  345. unsigned long flags;
  346. raw_spin_lock_irqsave(&desc->lock, flags);
  347. desc_set_defaults(irq, desc, desc_node(desc));
  348. raw_spin_unlock_irqrestore(&desc->lock, flags);
  349. }
  350. unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
  351. {
  352. struct irq_desc *desc = irq_to_desc(irq);
  353. return desc ? desc->kstat_irqs[cpu] : 0;
  354. }