irq_64.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  3. *
  4. * This file contains the lowest level x86_64-specific interrupt
  5. * entry and irq statistics code. All the remaining irq logic is
  6. * done by the generic kernel/irq/ code and in the
  7. * x86_64-specific irq controller code. (e.g. i8259.c and
  8. * io_apic.c.)
  9. */
  10. #include <linux/kernel_stat.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/seq_file.h>
  13. #include <linux/module.h>
  14. #include <linux/delay.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/smp.h>
  18. #include <asm/io_apic.h>
  19. #include <asm/idle.h>
  20. #include <asm/apic.h>
  21. DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
  22. EXPORT_PER_CPU_SYMBOL(irq_stat);
  23. DEFINE_PER_CPU(struct pt_regs *, irq_regs);
  24. EXPORT_PER_CPU_SYMBOL(irq_regs);
  25. /*
  26. * Probabilistic stack overflow check:
  27. *
  28. * Only check the stack in process context, because everything else
  29. * runs on the big interrupt stacks. Checking reliably is too expensive,
  30. * so we just check from interrupts.
  31. */
  32. static inline void stack_overflow_check(struct pt_regs *regs)
  33. {
  34. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  35. u64 curbase = (u64)task_stack_page(current);
  36. WARN_ONCE(regs->sp >= curbase &&
  37. regs->sp <= curbase + THREAD_SIZE &&
  38. regs->sp < curbase + sizeof(struct thread_info) +
  39. sizeof(struct pt_regs) + 128,
  40. "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
  41. current->comm, curbase, regs->sp);
  42. #endif
  43. }
  44. bool handle_irq(unsigned irq, struct pt_regs *regs)
  45. {
  46. struct irq_desc *desc;
  47. stack_overflow_check(regs);
  48. desc = irq_to_desc(irq);
  49. if (unlikely(!desc))
  50. return false;
  51. generic_handle_irq_desc(irq, desc);
  52. return true;
  53. }
  54. #ifdef CONFIG_HOTPLUG_CPU
  55. /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
  56. void fixup_irqs(void)
  57. {
  58. unsigned int irq;
  59. static int warned;
  60. struct irq_desc *desc;
  61. for_each_irq_desc(irq, desc) {
  62. int break_affinity = 0;
  63. int set_affinity = 1;
  64. const struct cpumask *affinity;
  65. if (!desc)
  66. continue;
  67. if (irq == 2)
  68. continue;
  69. /* interrupt's are disabled at this point */
  70. spin_lock(&desc->lock);
  71. affinity = desc->affinity;
  72. if (!irq_has_action(irq) ||
  73. cpumask_equal(affinity, cpu_online_mask)) {
  74. spin_unlock(&desc->lock);
  75. continue;
  76. }
  77. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  78. break_affinity = 1;
  79. affinity = cpu_all_mask;
  80. }
  81. if (desc->chip->mask)
  82. desc->chip->mask(irq);
  83. if (desc->chip->set_affinity)
  84. desc->chip->set_affinity(irq, affinity);
  85. else if (!(warned++))
  86. set_affinity = 0;
  87. if (desc->chip->unmask)
  88. desc->chip->unmask(irq);
  89. spin_unlock(&desc->lock);
  90. if (break_affinity && set_affinity)
  91. printk("Broke affinity for irq %i\n", irq);
  92. else if (!set_affinity)
  93. printk("Cannot set affinity for irq %i\n", irq);
  94. }
  95. /* That doesn't seem sufficient. Give it 1ms. */
  96. local_irq_enable();
  97. mdelay(1);
  98. local_irq_disable();
  99. }
  100. #endif
  101. extern void call_softirq(void);
  102. asmlinkage void do_softirq(void)
  103. {
  104. __u32 pending;
  105. unsigned long flags;
  106. if (in_interrupt())
  107. return;
  108. local_irq_save(flags);
  109. pending = local_softirq_pending();
  110. /* Switch to interrupt stack */
  111. if (pending) {
  112. call_softirq();
  113. WARN_ON_ONCE(softirq_count());
  114. }
  115. local_irq_restore(flags);
  116. }