irq_32.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  3. *
  4. * This file contains the lowest level x86-specific interrupt
  5. * entry, irq-stacks and irq statistics code. All the remaining
  6. * irq logic is done by the generic kernel/irq/ code and
  7. * by the x86-specific irq controller code. (e.g. i8259.c and
  8. * io_apic.c.)
  9. */
  10. #include <linux/module.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/notifier.h>
  15. #include <linux/cpu.h>
  16. #include <linux/delay.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/percpu.h>
  19. #include <asm/apic.h>
  20. DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
  21. EXPORT_PER_CPU_SYMBOL(irq_stat);
  22. DEFINE_PER_CPU(struct pt_regs *, irq_regs);
  23. EXPORT_PER_CPU_SYMBOL(irq_regs);
  24. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  25. /* Debugging check for stack overflow: is there less than 1KB free? */
  26. static int check_stack_overflow(void)
  27. {
  28. long sp;
  29. __asm__ __volatile__("andl %%esp,%0" :
  30. "=r" (sp) : "0" (THREAD_SIZE - 1));
  31. return sp < (sizeof(struct thread_info) + STACK_WARN);
  32. }
  33. static void print_stack_overflow(void)
  34. {
  35. printk(KERN_WARNING "low stack detected by irq handler\n");
  36. dump_stack();
  37. }
  38. #else
  39. static inline int check_stack_overflow(void) { return 0; }
  40. static inline void print_stack_overflow(void) { }
  41. #endif
  42. /*
  43. * per-CPU IRQ handling contexts (thread information and stack)
  44. */
  45. union irq_ctx {
  46. struct thread_info tinfo;
  47. u32 stack[THREAD_SIZE/sizeof(u32)];
  48. } __attribute__((aligned(THREAD_SIZE)));
  49. static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
  50. static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
  51. static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
  52. static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
  53. static void call_on_stack(void *func, void *stack)
  54. {
  55. asm volatile("xchgl %%ebx,%%esp \n"
  56. "call *%%edi \n"
  57. "movl %%ebx,%%esp \n"
  58. : "=b" (stack)
  59. : "0" (stack),
  60. "D"(func)
  61. : "memory", "cc", "edx", "ecx", "eax");
  62. }
  63. static inline int
  64. execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
  65. {
  66. union irq_ctx *curctx, *irqctx;
  67. u32 *isp, arg1, arg2;
  68. curctx = (union irq_ctx *) current_thread_info();
  69. irqctx = __get_cpu_var(hardirq_ctx);
  70. /*
  71. * this is where we switch to the IRQ stack. However, if we are
  72. * already using the IRQ stack (because we interrupted a hardirq
  73. * handler) we can't do that and just have to keep using the
  74. * current stack (which is the irq stack already after all)
  75. */
  76. if (unlikely(curctx == irqctx))
  77. return 0;
  78. /* build the stack frame on the IRQ stack */
  79. isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
  80. irqctx->tinfo.task = curctx->tinfo.task;
  81. irqctx->tinfo.previous_esp = current_stack_pointer;
  82. /*
  83. * Copy the softirq bits in preempt_count so that the
  84. * softirq checks work in the hardirq context.
  85. */
  86. irqctx->tinfo.preempt_count =
  87. (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
  88. (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
  89. if (unlikely(overflow))
  90. call_on_stack(print_stack_overflow, isp);
  91. asm volatile("xchgl %%ebx,%%esp \n"
  92. "call *%%edi \n"
  93. "movl %%ebx,%%esp \n"
  94. : "=a" (arg1), "=d" (arg2), "=b" (isp)
  95. : "0" (irq), "1" (desc), "2" (isp),
  96. "D" (desc->handle_irq)
  97. : "memory", "cc", "ecx");
  98. return 1;
  99. }
  100. /*
  101. * allocate per-cpu stacks for hardirq and for softirq processing
  102. */
  103. void __cpuinit irq_ctx_init(int cpu)
  104. {
  105. union irq_ctx *irqctx;
  106. if (per_cpu(hardirq_ctx, cpu))
  107. return;
  108. irqctx = &per_cpu(hardirq_stack, cpu);
  109. irqctx->tinfo.task = NULL;
  110. irqctx->tinfo.exec_domain = NULL;
  111. irqctx->tinfo.cpu = cpu;
  112. irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
  113. irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
  114. per_cpu(hardirq_ctx, cpu) = irqctx;
  115. irqctx = &per_cpu(softirq_stack, cpu);
  116. irqctx->tinfo.task = NULL;
  117. irqctx->tinfo.exec_domain = NULL;
  118. irqctx->tinfo.cpu = cpu;
  119. irqctx->tinfo.preempt_count = 0;
  120. irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
  121. per_cpu(softirq_ctx, cpu) = irqctx;
  122. printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
  123. cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
  124. }
  125. void irq_ctx_exit(int cpu)
  126. {
  127. per_cpu(hardirq_ctx, cpu) = NULL;
  128. }
  129. asmlinkage void do_softirq(void)
  130. {
  131. unsigned long flags;
  132. struct thread_info *curctx;
  133. union irq_ctx *irqctx;
  134. u32 *isp;
  135. if (in_interrupt())
  136. return;
  137. local_irq_save(flags);
  138. if (local_softirq_pending()) {
  139. curctx = current_thread_info();
  140. irqctx = __get_cpu_var(softirq_ctx);
  141. irqctx->tinfo.task = curctx->task;
  142. irqctx->tinfo.previous_esp = current_stack_pointer;
  143. /* build the stack frame on the softirq stack */
  144. isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
  145. call_on_stack(__do_softirq, isp);
  146. /*
  147. * Shouldnt happen, we returned above if in_interrupt():
  148. */
  149. WARN_ON_ONCE(softirq_count());
  150. }
  151. local_irq_restore(flags);
  152. }
  153. bool handle_irq(unsigned irq, struct pt_regs *regs)
  154. {
  155. struct irq_desc *desc;
  156. int overflow;
  157. overflow = check_stack_overflow();
  158. desc = irq_to_desc(irq);
  159. if (unlikely(!desc))
  160. return false;
  161. if (!execute_on_irq_stack(overflow, desc, irq)) {
  162. if (unlikely(overflow))
  163. print_stack_overflow();
  164. desc->handle_irq(irq, desc);
  165. }
  166. return true;
  167. }