hw_nmi.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. /*
  2. * HW NMI watchdog support
  3. *
  4. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5. *
  6. * Arch specific calls to support NMI watchdog
  7. *
  8. * Bits copied from original nmi.c file
  9. *
  10. */
  11. #include <asm/apic.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/kdebug.h>
  14. #include <linux/notifier.h>
  15. #include <linux/kprobes.h>
  16. #include <linux/nmi.h>
  17. #include <linux/module.h>
  18. /* For reliability, we're prepared to waste bits here. */
  19. static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
  20. u64 hw_nmi_get_sample_period(void)
  21. {
  22. return (u64)(cpu_khz) * 1000 * 60;
  23. }
  24. #ifdef ARCH_HAS_NMI_WATCHDOG
  25. void arch_trigger_all_cpu_backtrace(void)
  26. {
  27. int i;
  28. cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
  29. printk(KERN_INFO "sending NMI to all CPUs:\n");
  30. apic->send_IPI_all(NMI_VECTOR);
  31. /* Wait for up to 10 seconds for all CPUs to do the backtrace */
  32. for (i = 0; i < 10 * 1000; i++) {
  33. if (cpumask_empty(to_cpumask(backtrace_mask)))
  34. break;
  35. mdelay(1);
  36. }
  37. }
  38. static int __kprobes
  39. arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
  40. unsigned long cmd, void *__args)
  41. {
  42. struct die_args *args = __args;
  43. struct pt_regs *regs;
  44. int cpu = smp_processor_id();
  45. switch (cmd) {
  46. case DIE_NMI:
  47. case DIE_NMI_IPI:
  48. break;
  49. default:
  50. return NOTIFY_DONE;
  51. }
  52. regs = args->regs;
  53. if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
  54. static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
  55. arch_spin_lock(&lock);
  56. printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
  57. show_regs(regs);
  58. dump_stack();
  59. arch_spin_unlock(&lock);
  60. cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
  61. return NOTIFY_STOP;
  62. }
  63. return NOTIFY_DONE;
  64. }
  65. static __read_mostly struct notifier_block backtrace_notifier = {
  66. .notifier_call = arch_trigger_all_cpu_backtrace_handler,
  67. .next = NULL,
  68. .priority = 1
  69. };
  70. static int __init register_trigger_all_cpu_backtrace(void)
  71. {
  72. register_die_notifier(&backtrace_notifier);
  73. return 0;
  74. }
  75. early_initcall(register_trigger_all_cpu_backtrace);
  76. #endif
  77. /* STUB calls to mimic old nmi_watchdog behaviour */
  78. #if defined(CONFIG_X86_LOCAL_APIC)
  79. unsigned int nmi_watchdog = NMI_NONE;
  80. EXPORT_SYMBOL(nmi_watchdog);
  81. void acpi_nmi_enable(void) { return; }
  82. void acpi_nmi_disable(void) { return; }
  83. #endif
  84. atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
  85. EXPORT_SYMBOL(nmi_active);
  86. int unknown_nmi_panic;
  87. void cpu_nmi_set_wd_enabled(void) { return; }
  88. void stop_apic_nmi_watchdog(void *unused) { return; }
  89. void setup_apic_nmi_watchdog(void *unused) { return; }
  90. int __init check_nmi_watchdog(void) { return 0; }