crash.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /*
  2. * Architecture specific (x86_64) functions for kexec based crash dumps.
  3. *
  4. * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  5. *
  6. * Copyright (C) IBM Corporation, 2004. All rights reserved.
  7. *
  8. */
  9. #include <linux/init.h>
  10. #include <linux/types.h>
  11. #include <linux/kernel.h>
  12. #include <linux/smp.h>
  13. #include <linux/irq.h>
  14. #include <linux/reboot.h>
  15. #include <linux/kexec.h>
  16. #include <linux/delay.h>
  17. #include <linux/elf.h>
  18. #include <linux/elfcore.h>
  19. #include <linux/kdebug.h>
  20. #include <asm/processor.h>
  21. #include <asm/hardirq.h>
  22. #include <asm/nmi.h>
  23. #include <asm/hw_irq.h>
  24. #include <asm/mach_apic.h>
  25. /* This keeps a track of which one is crashing cpu. */
  26. static int crashing_cpu;
  27. #ifdef CONFIG_SMP
  28. static atomic_t waiting_for_crash_ipi;
  29. static int crash_nmi_callback(struct notifier_block *self,
  30. unsigned long val, void *data)
  31. {
  32. struct pt_regs *regs;
  33. int cpu;
  34. if (val != DIE_NMI_IPI)
  35. return NOTIFY_OK;
  36. regs = ((struct die_args *)data)->regs;
  37. cpu = raw_smp_processor_id();
  38. /*
  39. * Don't do anything if this handler is invoked on crashing cpu.
  40. * Otherwise, system will completely hang. Crashing cpu can get
  41. * an NMI if system was initially booted with nmi_watchdog parameter.
  42. */
  43. if (cpu == crashing_cpu)
  44. return NOTIFY_STOP;
  45. local_irq_disable();
  46. crash_save_cpu(regs, cpu);
  47. disable_local_APIC();
  48. atomic_dec(&waiting_for_crash_ipi);
  49. /* Assume hlt works */
  50. for(;;)
  51. halt();
  52. return 1;
  53. }
  54. static void smp_send_nmi_allbutself(void)
  55. {
  56. send_IPI_allbutself(NMI_VECTOR);
  57. }
  58. /*
  59. * This code is a best effort heuristic to get the
  60. * other cpus to stop executing. So races with
  61. * cpu hotplug shouldn't matter.
  62. */
  63. static struct notifier_block crash_nmi_nb = {
  64. .notifier_call = crash_nmi_callback,
  65. };
  66. static void nmi_shootdown_cpus(void)
  67. {
  68. unsigned long msecs;
  69. atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
  70. if (register_die_notifier(&crash_nmi_nb))
  71. return; /* return what? */
  72. /*
  73. * Ensure the new callback function is set before sending
  74. * out the NMI
  75. */
  76. wmb();
  77. smp_send_nmi_allbutself();
  78. msecs = 1000; /* Wait at most a second for the other cpus to stop */
  79. while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
  80. mdelay(1);
  81. msecs--;
  82. }
  83. /* Leave the nmi callback set */
  84. disable_local_APIC();
  85. }
  86. #else
  87. static void nmi_shootdown_cpus(void)
  88. {
  89. /* There are no cpus to shootdown */
  90. }
  91. #endif
  92. void machine_crash_shutdown(struct pt_regs *regs)
  93. {
  94. /*
  95. * This function is only called after the system
  96. * has panicked or is otherwise in a critical state.
  97. * The minimum amount of code to allow a kexec'd kernel
  98. * to run successfully needs to happen here.
  99. *
  100. * In practice this means shooting down the other cpus in
  101. * an SMP system.
  102. */
  103. /* The kernel is broken so disable interrupts */
  104. local_irq_disable();
  105. /* Make a note of crashing cpu. Will be used in NMI callback.*/
  106. crashing_cpu = smp_processor_id();
  107. nmi_shootdown_cpus();
  108. if(cpu_has_apic)
  109. disable_local_APIC();
  110. disable_IO_APIC();
  111. crash_save_cpu(regs, smp_processor_id());
  112. }