crash.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. /*
  2. * Architecture specific (i386) functions for kexec based crash dumps.
  3. *
  4. * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  5. *
  6. * Copyright (C) IBM Corporation, 2004. All rights reserved.
  7. *
  8. */
  9. #include <linux/init.h>
  10. #include <linux/types.h>
  11. #include <linux/kernel.h>
  12. #include <linux/smp.h>
  13. #include <linux/irq.h>
  14. #include <linux/reboot.h>
  15. #include <linux/kexec.h>
  16. #include <linux/irq.h>
  17. #include <linux/delay.h>
  18. #include <linux/elf.h>
  19. #include <linux/elfcore.h>
  20. #include <asm/processor.h>
  21. #include <asm/hardirq.h>
  22. #include <asm/nmi.h>
  23. #include <asm/hw_irq.h>
  24. #include <asm/apic.h>
  25. #include <mach_ipi.h>
  26. note_buf_t crash_notes[NR_CPUS];
  27. /* This keeps a track of which one is crashing cpu. */
  28. static int crashing_cpu;
  29. static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
  30. size_t data_len)
  31. {
  32. struct elf_note note;
  33. note.n_namesz = strlen(name) + 1;
  34. note.n_descsz = data_len;
  35. note.n_type = type;
  36. memcpy(buf, &note, sizeof(note));
  37. buf += (sizeof(note) +3)/4;
  38. memcpy(buf, name, note.n_namesz);
  39. buf += (note.n_namesz + 3)/4;
  40. memcpy(buf, data, note.n_descsz);
  41. buf += (note.n_descsz + 3)/4;
  42. return buf;
  43. }
  44. static void final_note(u32 *buf)
  45. {
  46. struct elf_note note;
  47. note.n_namesz = 0;
  48. note.n_descsz = 0;
  49. note.n_type = 0;
  50. memcpy(buf, &note, sizeof(note));
  51. }
  52. static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
  53. {
  54. struct elf_prstatus prstatus;
  55. u32 *buf;
  56. if ((cpu < 0) || (cpu >= NR_CPUS))
  57. return;
  58. /* Using ELF notes here is opportunistic.
  59. * I need a well defined structure format
  60. * for the data I pass, and I need tags
  61. * on the data to indicate what information I have
  62. * squirrelled away. ELF notes happen to provide
  63. * all of that that no need to invent something new.
  64. */
  65. buf = &crash_notes[cpu][0];
  66. memset(&prstatus, 0, sizeof(prstatus));
  67. prstatus.pr_pid = current->pid;
  68. elf_core_copy_regs(&prstatus.pr_reg, regs);
  69. buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
  70. sizeof(prstatus));
  71. final_note(buf);
  72. }
  73. static void crash_get_current_regs(struct pt_regs *regs)
  74. {
  75. __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
  76. __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
  77. __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
  78. __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
  79. __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
  80. __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
  81. __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
  82. __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
  83. __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
  84. __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
  85. __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
  86. __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
  87. __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
  88. regs->eip = (unsigned long)current_text_addr();
  89. }
  90. /* CPU does not save ss and esp on stack if execution is already
  91. * running in kernel mode at the time of NMI occurrence. This code
  92. * fixes it.
  93. */
  94. static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
  95. {
  96. memcpy(newregs, oldregs, sizeof(*newregs));
  97. newregs->esp = (unsigned long)&(oldregs->esp);
  98. __asm__ __volatile__("xorl %eax, %eax;");
  99. __asm__ __volatile__ ("movw %%ss, %%ax;" :"=a"(newregs->xss));
  100. }
  101. /* We may have saved_regs from where the error came from
  102. * or it is NULL if via a direct panic().
  103. */
  104. static void crash_save_self(struct pt_regs *saved_regs)
  105. {
  106. struct pt_regs regs;
  107. int cpu;
  108. cpu = smp_processor_id();
  109. if (saved_regs)
  110. crash_setup_regs(&regs, saved_regs);
  111. else
  112. crash_get_current_regs(&regs);
  113. crash_save_this_cpu(&regs, cpu);
  114. }
  115. #ifdef CONFIG_SMP
  116. static atomic_t waiting_for_crash_ipi;
  117. static int crash_nmi_callback(struct pt_regs *regs, int cpu)
  118. {
  119. struct pt_regs fixed_regs;
  120. /* Don't do anything if this handler is invoked on crashing cpu.
  121. * Otherwise, system will completely hang. Crashing cpu can get
  122. * an NMI if system was initially booted with nmi_watchdog parameter.
  123. */
  124. if (cpu == crashing_cpu)
  125. return 1;
  126. local_irq_disable();
  127. if (!user_mode(regs)) {
  128. crash_setup_regs(&fixed_regs, regs);
  129. regs = &fixed_regs;
  130. }
  131. crash_save_this_cpu(regs, cpu);
  132. disable_local_APIC();
  133. atomic_dec(&waiting_for_crash_ipi);
  134. /* Assume hlt works */
  135. halt();
  136. for(;;);
  137. return 1;
  138. }
  139. /*
  140. * By using the NMI code instead of a vector we just sneak thru the
  141. * word generator coming out with just what we want. AND it does
  142. * not matter if clustered_apic_mode is set or not.
  143. */
  144. static void smp_send_nmi_allbutself(void)
  145. {
  146. send_IPI_allbutself(APIC_DM_NMI);
  147. }
  148. static void nmi_shootdown_cpus(void)
  149. {
  150. unsigned long msecs;
  151. atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
  152. /* Would it be better to replace the trap vector here? */
  153. set_nmi_callback(crash_nmi_callback);
  154. /* Ensure the new callback function is set before sending
  155. * out the NMI
  156. */
  157. wmb();
  158. smp_send_nmi_allbutself();
  159. msecs = 1000; /* Wait at most a second for the other cpus to stop */
  160. while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
  161. mdelay(1);
  162. msecs--;
  163. }
  164. /* Leave the nmi callback set */
  165. disable_local_APIC();
  166. }
  167. #else
  168. static void nmi_shootdown_cpus(void)
  169. {
  170. /* There are no cpus to shootdown */
  171. }
  172. #endif
  173. void machine_crash_shutdown(struct pt_regs *regs)
  174. {
  175. /* This function is only called after the system
  176. * has paniced or is otherwise in a critical state.
  177. * The minimum amount of code to allow a kexec'd kernel
  178. * to run successfully needs to happen here.
  179. *
  180. * In practice this means shooting down the other cpus in
  181. * an SMP system.
  182. */
  183. /* The kernel is broken so disable interrupts */
  184. local_irq_disable();
  185. /* Make a note of crashing cpu. Will be used in NMI callback.*/
  186. crashing_cpu = smp_processor_id();
  187. nmi_shootdown_cpus();
  188. lapic_shutdown();
  189. #if defined(CONFIG_X86_IO_APIC)
  190. disable_IO_APIC();
  191. #endif
  192. crash_save_self(regs);
  193. }