irqinit_32.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. #include <linux/errno.h>
  2. #include <linux/signal.h>
  3. #include <linux/sched.h>
  4. #include <linux/ioport.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/slab.h>
  7. #include <linux/random.h>
  8. #include <linux/init.h>
  9. #include <linux/kernel_stat.h>
  10. #include <linux/sysdev.h>
  11. #include <linux/bitops.h>
  12. #include <linux/io.h>
  13. #include <linux/delay.h>
  14. #include <asm/atomic.h>
  15. #include <asm/system.h>
  16. #include <asm/timer.h>
  17. #include <asm/pgtable.h>
  18. #include <asm/desc.h>
  19. #include <asm/apic.h>
  20. #include <asm/setup.h>
  21. #include <asm/i8259.h>
  22. #include <asm/traps.h>
  23. #ifdef CONFIG_X86_32
  24. /*
  25. * Note that on a 486, we don't want to do a SIGFPE on an irq13
  26. * as the irq is unreliable, and exception 16 works correctly
  27. * (ie as explained in the intel literature). On a 386, you
  28. * can't use exception 16 due to bad IBM design, so we have to
  29. * rely on the less exact irq13.
  30. *
  31. * Careful.. Not only is IRQ13 unreliable, but it is also
  32. * leads to races. IBM designers who came up with it should
  33. * be shot.
  34. */
  35. static irqreturn_t math_error_irq(int cpl, void *dev_id)
  36. {
  37. outb(0, 0xF0);
  38. if (ignore_fpu_irq || !boot_cpu_data.hard_math)
  39. return IRQ_NONE;
  40. math_error((void __user *)get_irq_regs()->ip);
  41. return IRQ_HANDLED;
  42. }
  43. /*
  44. * New motherboards sometimes make IRQ 13 be a PCI interrupt,
  45. * so allow interrupt sharing.
  46. */
  47. static struct irqaction fpu_irq = {
  48. .handler = math_error_irq,
  49. .name = "fpu",
  50. };
  51. #endif
  52. /*
  53. * IRQ2 is cascade interrupt to second interrupt controller
  54. */
  55. static struct irqaction irq2 = {
  56. .handler = no_action,
  57. .name = "cascade",
  58. };
  59. DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
  60. [0 ... IRQ0_VECTOR - 1] = -1,
  61. [IRQ0_VECTOR] = 0,
  62. [IRQ1_VECTOR] = 1,
  63. [IRQ2_VECTOR] = 2,
  64. [IRQ3_VECTOR] = 3,
  65. [IRQ4_VECTOR] = 4,
  66. [IRQ5_VECTOR] = 5,
  67. [IRQ6_VECTOR] = 6,
  68. [IRQ7_VECTOR] = 7,
  69. [IRQ8_VECTOR] = 8,
  70. [IRQ9_VECTOR] = 9,
  71. [IRQ10_VECTOR] = 10,
  72. [IRQ11_VECTOR] = 11,
  73. [IRQ12_VECTOR] = 12,
  74. [IRQ13_VECTOR] = 13,
  75. [IRQ14_VECTOR] = 14,
  76. [IRQ15_VECTOR] = 15,
  77. [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
  78. };
  79. int vector_used_by_percpu_irq(unsigned int vector)
  80. {
  81. int cpu;
  82. for_each_online_cpu(cpu) {
  83. if (per_cpu(vector_irq, cpu)[vector] != -1)
  84. return 1;
  85. }
  86. return 0;
  87. }
  88. static void __init init_ISA_irqs(void)
  89. {
  90. int i;
  91. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
  92. init_bsp_APIC();
  93. #endif
  94. init_8259A(0);
  95. /*
  96. * 16 old-style INTA-cycle interrupts:
  97. */
  98. for (i = 0; i < NR_IRQS_LEGACY; i++) {
  99. struct irq_desc *desc = irq_to_desc(i);
  100. desc->status = IRQ_DISABLED;
  101. desc->action = NULL;
  102. desc->depth = 1;
  103. set_irq_chip_and_handler_name(i, &i8259A_chip,
  104. handle_level_irq, "XT");
  105. }
  106. }
  107. /* Overridden in paravirt.c */
  108. void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
  109. static void __init smp_intr_init(void)
  110. {
  111. #ifdef CONFIG_SMP
  112. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
  113. /*
  114. * The reschedule interrupt is a CPU-to-CPU reschedule-helper
  115. * IPI, driven by wakeup.
  116. */
  117. alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
  118. /* IPIs for invalidation */
  119. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
  120. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
  121. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
  122. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
  123. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
  124. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
  125. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
  126. alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
  127. /* IPI for generic function call */
  128. alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
  129. /* IPI for generic single function call */
  130. alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
  131. call_function_single_interrupt);
  132. /* Low priority IPI to cleanup after moving an irq */
  133. set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
  134. set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
  135. #endif
  136. #endif /* CONFIG_SMP */
  137. }
  138. static void __init apic_intr_init(void)
  139. {
  140. smp_intr_init();
  141. #ifdef CONFIG_X86_LOCAL_APIC
  142. /* self generated IPI for local APIC timer */
  143. alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
  144. /* generic IPI for platform specific use */
  145. alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
  146. /* IPI vectors for APIC spurious and error interrupts */
  147. alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
  148. alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
  149. #endif
  150. #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
  151. /* thermal monitor LVT interrupt */
  152. alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
  153. #endif
  154. }
  155. #ifdef CONFIG_X86_32
  156. /**
  157. * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
  158. *
  159. * Description:
  160. * Perform any necessary interrupt initialisation prior to setting up
  161. * the "ordinary" interrupt call gates. For legacy reasons, the ISA
  162. * interrupts should be initialised here if the machine emulates a PC
  163. * in any way.
  164. **/
  165. static void __init x86_quirk_pre_intr_init(void)
  166. {
  167. if (x86_quirks->arch_pre_intr_init) {
  168. if (x86_quirks->arch_pre_intr_init())
  169. return;
  170. }
  171. init_ISA_irqs();
  172. }
  173. #endif
  174. void __init native_init_IRQ(void)
  175. {
  176. int i;
  177. #ifdef CONFIG_X86_32
  178. /* Execute any quirks before the call gates are initialised: */
  179. x86_quirk_pre_intr_init();
  180. #else
  181. init_ISA_irqs();
  182. #endif
  183. /*
  184. * Cover the whole vector space, no vector can escape
  185. * us. (some of these will be overridden and become
  186. * 'special' SMP interrupts)
  187. */
  188. for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
  189. #ifdef CONFIG_X86_32
  190. /* SYSCALL_VECTOR was reserved in trap_init. */
  191. if (i != SYSCALL_VECTOR)
  192. set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
  193. #else
  194. /* IA32_SYSCALL_VECTOR was reserved in trap_init. */
  195. if (i != IA32_SYSCALL_VECTOR)
  196. set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
  197. #endif
  198. }
  199. apic_intr_init();
  200. if (!acpi_ioapic)
  201. setup_irq(2, &irq2);
  202. #ifdef CONFIG_X86_32
  203. /*
  204. * Call quirks after call gates are initialised (usually add in
  205. * the architecture specific gates):
  206. */
  207. x86_quirk_intr_init();
  208. /*
  209. * External FPU? Set up irq13 if so, for
  210. * original braindamaged IBM FERR coupling.
  211. */
  212. if (boot_cpu_data.hard_math && !cpu_has_fpu)
  213. setup_irq(FPU_IRQ, &fpu_irq);
  214. irq_ctx_init(smp_processor_id());
  215. #endif
  216. }