irq.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * linux/arch/x86_64/kernel/irq.c
  3. *
  4. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  5. *
  6. * This file contains the lowest level x86_64-specific interrupt
  7. * entry and irq statistics code. All the remaining irq logic is
  8. * done by the generic kernel/irq/ code and in the
  9. * x86_64-specific irq controller code. (e.g. i8259.c and
  10. * io_apic.c.)
  11. */
  12. #include <linux/kernel_stat.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/module.h>
  16. #include <linux/delay.h>
  17. #include <asm/uaccess.h>
  18. #include <asm/io_apic.h>
  19. atomic_t irq_err_count;
  20. #ifdef CONFIG_X86_IO_APIC
  21. #ifdef APIC_MISMATCH_DEBUG
  22. atomic_t irq_mis_count;
  23. #endif
  24. #endif
  25. /*
  26. * Generic, controller-independent functions:
  27. */
  28. int show_interrupts(struct seq_file *p, void *v)
  29. {
  30. int i = *(loff_t *) v, j;
  31. struct irqaction * action;
  32. unsigned long flags;
  33. if (i == 0) {
  34. seq_printf(p, " ");
  35. for (j=0; j<NR_CPUS; j++)
  36. if (cpu_online(j))
  37. seq_printf(p, "CPU%d ",j);
  38. seq_putc(p, '\n');
  39. }
  40. if (i < NR_IRQS) {
  41. spin_lock_irqsave(&irq_desc[i].lock, flags);
  42. action = irq_desc[i].action;
  43. if (!action)
  44. goto skip;
  45. seq_printf(p, "%3d: ",i);
  46. #ifndef CONFIG_SMP
  47. seq_printf(p, "%10u ", kstat_irqs(i));
  48. #else
  49. for (j=0; j<NR_CPUS; j++)
  50. if (cpu_online(j))
  51. seq_printf(p, "%10u ",
  52. kstat_cpu(j).irqs[i]);
  53. #endif
  54. seq_printf(p, " %14s", irq_desc[i].handler->typename);
  55. seq_printf(p, " %s", action->name);
  56. for (action=action->next; action; action = action->next)
  57. seq_printf(p, ", %s", action->name);
  58. seq_putc(p, '\n');
  59. skip:
  60. spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  61. } else if (i == NR_IRQS) {
  62. seq_printf(p, "NMI: ");
  63. for (j = 0; j < NR_CPUS; j++)
  64. if (cpu_online(j))
  65. seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
  66. seq_putc(p, '\n');
  67. #ifdef CONFIG_X86_LOCAL_APIC
  68. seq_printf(p, "LOC: ");
  69. for (j = 0; j < NR_CPUS; j++)
  70. if (cpu_online(j))
  71. seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
  72. seq_putc(p, '\n');
  73. #endif
  74. seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  75. #ifdef CONFIG_X86_IO_APIC
  76. #ifdef APIC_MISMATCH_DEBUG
  77. seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
  78. #endif
  79. #endif
  80. }
  81. return 0;
  82. }
  83. /*
  84. * do_IRQ handles all normal device IRQ's (the special
  85. * SMP cross-CPU interrupts have their own specific
  86. * handlers).
  87. */
  88. asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
  89. {
  90. /* high bits used in ret_from_ code */
  91. unsigned irq = regs->orig_rax & 0xff;
  92. irq_enter();
  93. BUG_ON(irq > 256);
  94. __do_IRQ(irq, regs);
  95. irq_exit();
  96. return 1;
  97. }
  98. #ifdef CONFIG_HOTPLUG_CPU
  99. void fixup_irqs(cpumask_t map)
  100. {
  101. unsigned int irq;
  102. static int warned;
  103. for (irq = 0; irq < NR_IRQS; irq++) {
  104. cpumask_t mask;
  105. if (irq == 2)
  106. continue;
  107. cpus_and(mask, irq_affinity[irq], map);
  108. if (any_online_cpu(mask) == NR_CPUS) {
  109. printk("Breaking affinity for irq %i\n", irq);
  110. mask = map;
  111. }
  112. if (irq_desc[irq].handler->set_affinity)
  113. irq_desc[irq].handler->set_affinity(irq, mask);
  114. else if (irq_desc[irq].action && !(warned++))
  115. printk("Cannot set affinity for irq %i\n", irq);
  116. }
  117. /* That doesn't seem sufficient. Give it 1ms. */
  118. local_irq_enable();
  119. mdelay(1);
  120. local_irq_disable();
  121. }
  122. #endif
  123. extern void call_softirq(void);
  124. asmlinkage void do_softirq(void)
  125. {
  126. __u32 pending;
  127. unsigned long flags;
  128. if (in_interrupt())
  129. return;
  130. local_irq_save(flags);
  131. pending = local_softirq_pending();
  132. /* Switch to interrupt stack */
  133. if (pending)
  134. call_softirq();
  135. local_irq_restore(flags);
  136. }
  137. EXPORT_SYMBOL(do_softirq);