irq_32.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  3. *
  4. * This file contains the lowest level x86-specific interrupt
  5. * entry, irq-stacks and irq statistics code. All the remaining
  6. * irq logic is done by the generic kernel/irq/ code and
  7. * by the x86-specific irq controller code. (e.g. i8259.c and
  8. * io_apic.c.)
  9. */
  10. #include <linux/module.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/notifier.h>
  15. #include <linux/cpu.h>
  16. #include <linux/delay.h>
  17. #include <linux/uaccess.h>
  18. #include <asm/apic.h>
  19. DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
  20. EXPORT_PER_CPU_SYMBOL(irq_stat);
  21. DEFINE_PER_CPU(struct pt_regs *, irq_regs);
  22. EXPORT_PER_CPU_SYMBOL(irq_regs);
  23. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  24. /* Debugging check for stack overflow: is there less than 1KB free? */
  25. static int check_stack_overflow(void)
  26. {
  27. long sp;
  28. __asm__ __volatile__("andl %%esp,%0" :
  29. "=r" (sp) : "0" (THREAD_SIZE - 1));
  30. return sp < (sizeof(struct thread_info) + STACK_WARN);
  31. }
  32. static void print_stack_overflow(void)
  33. {
  34. printk(KERN_WARNING "low stack detected by irq handler\n");
  35. dump_stack();
  36. }
  37. #else
  38. static inline int check_stack_overflow(void) { return 0; }
  39. static inline void print_stack_overflow(void) { }
  40. #endif
  41. #ifdef CONFIG_4KSTACKS
  42. /*
  43. * per-CPU IRQ handling contexts (thread information and stack)
  44. */
  45. union irq_ctx {
  46. struct thread_info tinfo;
  47. u32 stack[THREAD_SIZE/sizeof(u32)];
  48. };
  49. static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
  50. static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
  51. static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
  52. static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
  53. static void call_on_stack(void *func, void *stack)
  54. {
  55. asm volatile("xchgl %%ebx,%%esp \n"
  56. "call *%%edi \n"
  57. "movl %%ebx,%%esp \n"
  58. : "=b" (stack)
  59. : "0" (stack),
  60. "D"(func)
  61. : "memory", "cc", "edx", "ecx", "eax");
  62. }
  63. static inline int
  64. execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
  65. {
  66. union irq_ctx *curctx, *irqctx;
  67. u32 *isp, arg1, arg2;
  68. curctx = (union irq_ctx *) current_thread_info();
  69. irqctx = hardirq_ctx[smp_processor_id()];
  70. /*
  71. * this is where we switch to the IRQ stack. However, if we are
  72. * already using the IRQ stack (because we interrupted a hardirq
  73. * handler) we can't do that and just have to keep using the
  74. * current stack (which is the irq stack already after all)
  75. */
  76. if (unlikely(curctx == irqctx))
  77. return 0;
  78. /* build the stack frame on the IRQ stack */
  79. isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
  80. irqctx->tinfo.task = curctx->tinfo.task;
  81. irqctx->tinfo.previous_esp = current_stack_pointer;
  82. /*
  83. * Copy the softirq bits in preempt_count so that the
  84. * softirq checks work in the hardirq context.
  85. */
  86. irqctx->tinfo.preempt_count =
  87. (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
  88. (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
  89. if (unlikely(overflow))
  90. call_on_stack(print_stack_overflow, isp);
  91. asm volatile("xchgl %%ebx,%%esp \n"
  92. "call *%%edi \n"
  93. "movl %%ebx,%%esp \n"
  94. : "=a" (arg1), "=d" (arg2), "=b" (isp)
  95. : "0" (irq), "1" (desc), "2" (isp),
  96. "D" (desc->handle_irq)
  97. : "memory", "cc", "ecx");
  98. return 1;
  99. }
  100. /*
  101. * allocate per-cpu stacks for hardirq and for softirq processing
  102. */
  103. void __cpuinit irq_ctx_init(int cpu)
  104. {
  105. union irq_ctx *irqctx;
  106. if (hardirq_ctx[cpu])
  107. return;
  108. irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
  109. irqctx->tinfo.task = NULL;
  110. irqctx->tinfo.exec_domain = NULL;
  111. irqctx->tinfo.cpu = cpu;
  112. irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
  113. irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
  114. hardirq_ctx[cpu] = irqctx;
  115. irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
  116. irqctx->tinfo.task = NULL;
  117. irqctx->tinfo.exec_domain = NULL;
  118. irqctx->tinfo.cpu = cpu;
  119. irqctx->tinfo.preempt_count = 0;
  120. irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
  121. softirq_ctx[cpu] = irqctx;
  122. printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
  123. cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
  124. }
  125. void irq_ctx_exit(int cpu)
  126. {
  127. hardirq_ctx[cpu] = NULL;
  128. }
  129. asmlinkage void do_softirq(void)
  130. {
  131. unsigned long flags;
  132. struct thread_info *curctx;
  133. union irq_ctx *irqctx;
  134. u32 *isp;
  135. if (in_interrupt())
  136. return;
  137. local_irq_save(flags);
  138. if (local_softirq_pending()) {
  139. curctx = current_thread_info();
  140. irqctx = softirq_ctx[smp_processor_id()];
  141. irqctx->tinfo.task = curctx->task;
  142. irqctx->tinfo.previous_esp = current_stack_pointer;
  143. /* build the stack frame on the softirq stack */
  144. isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
  145. call_on_stack(__do_softirq, isp);
  146. /*
  147. * Shouldnt happen, we returned above if in_interrupt():
  148. */
  149. WARN_ON_ONCE(softirq_count());
  150. }
  151. local_irq_restore(flags);
  152. }
  153. #else
  154. static inline int
  155. execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
  156. #endif
  157. bool handle_irq(unsigned irq, struct pt_regs *regs)
  158. {
  159. struct irq_desc *desc;
  160. int overflow;
  161. overflow = check_stack_overflow();
  162. desc = irq_to_desc(irq);
  163. if (unlikely(!desc))
  164. return false;
  165. if (!execute_on_irq_stack(overflow, desc, irq)) {
  166. if (unlikely(overflow))
  167. print_stack_overflow();
  168. desc->handle_irq(irq, desc);
  169. }
  170. return true;
  171. }
  172. #ifdef CONFIG_HOTPLUG_CPU
  173. #include <asm/apic.h>
  174. /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
  175. void fixup_irqs(void)
  176. {
  177. unsigned int irq;
  178. static int warned;
  179. struct irq_desc *desc;
  180. for_each_irq_desc(irq, desc) {
  181. const struct cpumask *affinity;
  182. if (!desc)
  183. continue;
  184. if (irq == 2)
  185. continue;
  186. affinity = desc->affinity;
  187. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  188. printk("Breaking affinity for irq %i\n", irq);
  189. affinity = cpu_all_mask;
  190. }
  191. if (desc->chip->set_affinity)
  192. desc->chip->set_affinity(irq, affinity);
  193. else if (desc->action && !(warned++))
  194. printk("Cannot set affinity for irq %i\n", irq);
  195. }
  196. #if 0
  197. barrier();
  198. /* Ingo Molnar says: "after the IO-APIC masks have been redirected
  199. [note the nop - the interrupt-enable boundary on x86 is two
  200. instructions from sti] - to flush out pending hardirqs and
  201. IPIs. After this point nothing is supposed to reach this CPU." */
  202. __asm__ __volatile__("sti; nop; cli");
  203. barrier();
  204. #else
  205. /* That doesn't seem sufficient. Give it 1ms. */
  206. local_irq_enable();
  207. mdelay(1);
  208. local_irq_disable();
  209. #endif
  210. }
  211. #endif