handle.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * linux/kernel/irq/handle.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  6. *
  7. * This file contains the core interrupt handling code.
  8. *
  9. * Detailed information is available in Documentation/DocBook/genericirq
  10. *
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/module.h>
  14. #include <linux/random.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/rculist.h>
  18. #include <linux/hash.h>
  19. #include "internals.h"
  20. /*
  21. * lockdep: we want to handle all irq_desc locks as a single lock-class:
  22. */
  23. struct lock_class_key irq_desc_lock_class;
  24. /**
  25. * handle_bad_irq - handle spurious and unhandled irqs
  26. * @irq: the interrupt number
  27. * @desc: description of the interrupt
  28. *
  29. * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
  30. */
  31. void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
  32. {
  33. print_irq_desc(irq, desc);
  34. kstat_incr_irqs_this_cpu(irq, desc);
  35. ack_bad_irq(irq);
  36. }
  37. /*
  38. * Linux has a controller-independent interrupt architecture.
  39. * Every controller has a 'controller-template', that is used
  40. * by the main code to do the right thing. Each driver-visible
  41. * interrupt source is transparently wired to the appropriate
  42. * controller. Thus drivers need not be aware of the
  43. * interrupt-controller.
  44. *
  45. * The code is designed to be easily extended with new/different
  46. * interrupt controllers, without having to do assembly magic or
  47. * having to touch the generic code.
  48. *
  49. * Controller mappings for all interrupt sources:
  50. */
  51. int nr_irqs = NR_IRQS;
  52. EXPORT_SYMBOL_GPL(nr_irqs);
  53. void __init __attribute__((weak)) arch_early_irq_init(void)
  54. {
  55. }
  56. #ifdef CONFIG_SPARSE_IRQ
  57. static struct irq_desc irq_desc_init = {
  58. .irq = -1,
  59. .status = IRQ_DISABLED,
  60. .chip = &no_irq_chip,
  61. .handle_irq = handle_bad_irq,
  62. .depth = 1,
  63. .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
  64. #ifdef CONFIG_SMP
  65. .affinity = CPU_MASK_ALL
  66. #endif
  67. };
  68. void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
  69. {
  70. unsigned long bytes;
  71. char *ptr;
  72. int node;
  73. /* Compute how many bytes we need per irq and allocate them */
  74. bytes = nr * sizeof(unsigned int);
  75. node = cpu_to_node(cpu);
  76. ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
  77. printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
  78. if (ptr)
  79. desc->kstat_irqs = (unsigned int *)ptr;
  80. }
  81. void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
  82. {
  83. }
  84. static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
  85. {
  86. memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
  87. desc->irq = irq;
  88. #ifdef CONFIG_SMP
  89. desc->cpu = cpu;
  90. #endif
  91. lockdep_set_class(&desc->lock, &irq_desc_lock_class);
  92. init_kstat_irqs(desc, cpu, nr_cpu_ids);
  93. if (!desc->kstat_irqs) {
  94. printk(KERN_ERR "can not alloc kstat_irqs\n");
  95. BUG_ON(1);
  96. }
  97. arch_init_chip_data(desc, cpu);
  98. }
  99. /*
  100. * Protect the sparse_irqs:
  101. */
  102. DEFINE_SPINLOCK(sparse_irq_lock);
  103. struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
  104. static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
  105. [0 ... NR_IRQS_LEGACY-1] = {
  106. .irq = -1,
  107. .status = IRQ_DISABLED,
  108. .chip = &no_irq_chip,
  109. .handle_irq = handle_bad_irq,
  110. .depth = 1,
  111. .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
  112. #ifdef CONFIG_SMP
  113. .affinity = CPU_MASK_ALL
  114. #endif
  115. }
  116. };
  117. /* FIXME: use bootmem alloc ...*/
  118. static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
  119. void __init early_irq_init(void)
  120. {
  121. struct irq_desc *desc;
  122. int legacy_count;
  123. int i;
  124. desc = irq_desc_legacy;
  125. legacy_count = ARRAY_SIZE(irq_desc_legacy);
  126. for (i = 0; i < legacy_count; i++) {
  127. desc[i].irq = i;
  128. desc[i].kstat_irqs = kstat_irqs_legacy[i];
  129. irq_desc_ptrs[i] = desc + i;
  130. }
  131. for (i = legacy_count; i < NR_IRQS; i++)
  132. irq_desc_ptrs[i] = NULL;
  133. arch_early_irq_init();
  134. }
  135. struct irq_desc *irq_to_desc(unsigned int irq)
  136. {
  137. return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
  138. }
  139. struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
  140. {
  141. struct irq_desc *desc;
  142. unsigned long flags;
  143. int node;
  144. if (irq >= NR_IRQS) {
  145. printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
  146. irq, NR_IRQS);
  147. WARN_ON(1);
  148. return NULL;
  149. }
  150. desc = irq_desc_ptrs[irq];
  151. if (desc)
  152. return desc;
  153. spin_lock_irqsave(&sparse_irq_lock, flags);
  154. /* We have to check it to avoid races with another CPU */
  155. desc = irq_desc_ptrs[irq];
  156. if (desc)
  157. goto out_unlock;
  158. node = cpu_to_node(cpu);
  159. desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
  160. printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
  161. irq, cpu, node);
  162. if (!desc) {
  163. printk(KERN_ERR "can not alloc irq_desc\n");
  164. BUG_ON(1);
  165. }
  166. init_one_irq_desc(irq, desc, cpu);
  167. irq_desc_ptrs[irq] = desc;
  168. out_unlock:
  169. spin_unlock_irqrestore(&sparse_irq_lock, flags);
  170. return desc;
  171. }
  172. #else
  173. struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
  174. [0 ... NR_IRQS-1] = {
  175. .status = IRQ_DISABLED,
  176. .chip = &no_irq_chip,
  177. .handle_irq = handle_bad_irq,
  178. .depth = 1,
  179. .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
  180. #ifdef CONFIG_SMP
  181. .affinity = CPU_MASK_ALL
  182. #endif
  183. }
  184. };
  185. #endif
  186. /*
  187. * What should we do if we get a hw irq event on an illegal vector?
  188. * Each architecture has to answer this themself.
  189. */
  190. static void ack_bad(unsigned int irq)
  191. {
  192. struct irq_desc *desc = irq_to_desc(irq);
  193. print_irq_desc(irq, desc);
  194. ack_bad_irq(irq);
  195. }
  196. /*
  197. * NOP functions
  198. */
  199. static void noop(unsigned int irq)
  200. {
  201. }
  202. static unsigned int noop_ret(unsigned int irq)
  203. {
  204. return 0;
  205. }
  206. /*
  207. * Generic no controller implementation
  208. */
  209. struct irq_chip no_irq_chip = {
  210. .name = "none",
  211. .startup = noop_ret,
  212. .shutdown = noop,
  213. .enable = noop,
  214. .disable = noop,
  215. .ack = ack_bad,
  216. .end = noop,
  217. };
  218. /*
  219. * Generic dummy implementation which can be used for
  220. * real dumb interrupt sources
  221. */
  222. struct irq_chip dummy_irq_chip = {
  223. .name = "dummy",
  224. .startup = noop_ret,
  225. .shutdown = noop,
  226. .enable = noop,
  227. .disable = noop,
  228. .ack = noop,
  229. .mask = noop,
  230. .unmask = noop,
  231. .end = noop,
  232. };
  233. /*
  234. * Special, empty irq handler:
  235. */
  236. irqreturn_t no_action(int cpl, void *dev_id)
  237. {
  238. return IRQ_NONE;
  239. }
  240. /**
  241. * handle_IRQ_event - irq action chain handler
  242. * @irq: the interrupt number
  243. * @action: the interrupt action chain for this irq
  244. *
  245. * Handles the action chain of an irq event
  246. */
  247. irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
  248. {
  249. irqreturn_t ret, retval = IRQ_NONE;
  250. unsigned int status = 0;
  251. if (!(action->flags & IRQF_DISABLED))
  252. local_irq_enable_in_hardirq();
  253. do {
  254. ret = action->handler(irq, action->dev_id);
  255. if (ret == IRQ_HANDLED)
  256. status |= action->flags;
  257. retval |= ret;
  258. action = action->next;
  259. } while (action);
  260. if (status & IRQF_SAMPLE_RANDOM)
  261. add_interrupt_randomness(irq);
  262. local_irq_disable();
  263. return retval;
  264. }
  265. #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
  266. /**
  267. * __do_IRQ - original all in one highlevel IRQ handler
  268. * @irq: the interrupt number
  269. *
  270. * __do_IRQ handles all normal device IRQ's (the special
  271. * SMP cross-CPU interrupts have their own specific
  272. * handlers).
  273. *
  274. * This is the original x86 implementation which is used for every
  275. * interrupt type.
  276. */
  277. unsigned int __do_IRQ(unsigned int irq)
  278. {
  279. struct irq_desc *desc = irq_to_desc(irq);
  280. struct irqaction *action;
  281. unsigned int status;
  282. kstat_incr_irqs_this_cpu(irq, desc);
  283. if (CHECK_IRQ_PER_CPU(desc->status)) {
  284. irqreturn_t action_ret;
  285. /*
  286. * No locking required for CPU-local interrupts:
  287. */
  288. if (desc->chip->ack) {
  289. desc->chip->ack(irq);
  290. /* get new one */
  291. desc = irq_remap_to_desc(irq, desc);
  292. }
  293. if (likely(!(desc->status & IRQ_DISABLED))) {
  294. action_ret = handle_IRQ_event(irq, desc->action);
  295. if (!noirqdebug)
  296. note_interrupt(irq, desc, action_ret);
  297. }
  298. desc->chip->end(irq);
  299. return 1;
  300. }
  301. spin_lock(&desc->lock);
  302. if (desc->chip->ack) {
  303. desc->chip->ack(irq);
  304. desc = irq_remap_to_desc(irq, desc);
  305. }
  306. /*
  307. * REPLAY is when Linux resends an IRQ that was dropped earlier
  308. * WAITING is used by probe to mark irqs that are being tested
  309. */
  310. status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
  311. status |= IRQ_PENDING; /* we _want_ to handle it */
  312. /*
  313. * If the IRQ is disabled for whatever reason, we cannot
  314. * use the action we have.
  315. */
  316. action = NULL;
  317. if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
  318. action = desc->action;
  319. status &= ~IRQ_PENDING; /* we commit to handling */
  320. status |= IRQ_INPROGRESS; /* we are handling it */
  321. }
  322. desc->status = status;
  323. /*
  324. * If there is no IRQ handler or it was disabled, exit early.
  325. * Since we set PENDING, if another processor is handling
  326. * a different instance of this same irq, the other processor
  327. * will take care of it.
  328. */
  329. if (unlikely(!action))
  330. goto out;
  331. /*
  332. * Edge triggered interrupts need to remember
  333. * pending events.
  334. * This applies to any hw interrupts that allow a second
  335. * instance of the same irq to arrive while we are in do_IRQ
  336. * or in the handler. But the code here only handles the _second_
  337. * instance of the irq, not the third or fourth. So it is mostly
  338. * useful for irq hardware that does not mask cleanly in an
  339. * SMP environment.
  340. */
  341. for (;;) {
  342. irqreturn_t action_ret;
  343. spin_unlock(&desc->lock);
  344. action_ret = handle_IRQ_event(irq, action);
  345. if (!noirqdebug)
  346. note_interrupt(irq, desc, action_ret);
  347. spin_lock(&desc->lock);
  348. if (likely(!(desc->status & IRQ_PENDING)))
  349. break;
  350. desc->status &= ~IRQ_PENDING;
  351. }
  352. desc->status &= ~IRQ_INPROGRESS;
  353. out:
  354. /*
  355. * The ->end() handler has to deal with interrupts which got
  356. * disabled while the handler was running.
  357. */
  358. desc->chip->end(irq);
  359. spin_unlock(&desc->lock);
  360. return 1;
  361. }
  362. #endif
  363. void early_init_irq_lock_class(void)
  364. {
  365. struct irq_desc *desc;
  366. int i;
  367. for_each_irq_desc(i, desc) {
  368. if (!desc)
  369. continue;
  370. lockdep_set_class(&desc->lock, &irq_desc_lock_class);
  371. }
  372. }
  373. #ifdef CONFIG_SPARSE_IRQ
  374. unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
  375. {
  376. struct irq_desc *desc = irq_to_desc(irq);
  377. return desc->kstat_irqs[cpu];
  378. }
  379. #endif
  380. EXPORT_SYMBOL(kstat_irqs_cpu);