crash.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * Architecture specific (i386) functions for kexec based crash dumps.
  3. *
  4. * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  5. *
  6. * Copyright (C) IBM Corporation, 2004. All rights reserved.
  7. *
  8. */
  9. #include <linux/init.h>
  10. #include <linux/types.h>
  11. #include <linux/kernel.h>
  12. #include <linux/smp.h>
  13. #include <linux/reboot.h>
  14. #include <linux/kexec.h>
  15. #include <linux/delay.h>
  16. #include <linux/elf.h>
  17. #include <linux/elfcore.h>
  18. #include <asm/processor.h>
  19. #include <asm/hardirq.h>
  20. #include <asm/nmi.h>
  21. #include <asm/hw_irq.h>
  22. #include <mach_ipi.h>
  23. note_buf_t crash_notes[NR_CPUS];
  24. /* This keeps a track of which one is crashing cpu. */
  25. static int crashing_cpu;
  26. static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
  27. size_t data_len)
  28. {
  29. struct elf_note note;
  30. note.n_namesz = strlen(name) + 1;
  31. note.n_descsz = data_len;
  32. note.n_type = type;
  33. memcpy(buf, &note, sizeof(note));
  34. buf += (sizeof(note) +3)/4;
  35. memcpy(buf, name, note.n_namesz);
  36. buf += (note.n_namesz + 3)/4;
  37. memcpy(buf, data, note.n_descsz);
  38. buf += (note.n_descsz + 3)/4;
  39. return buf;
  40. }
  41. static void final_note(u32 *buf)
  42. {
  43. struct elf_note note;
  44. note.n_namesz = 0;
  45. note.n_descsz = 0;
  46. note.n_type = 0;
  47. memcpy(buf, &note, sizeof(note));
  48. }
  49. static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
  50. {
  51. struct elf_prstatus prstatus;
  52. u32 *buf;
  53. if ((cpu < 0) || (cpu >= NR_CPUS))
  54. return;
  55. /* Using ELF notes here is opportunistic.
  56. * I need a well defined structure format
  57. * for the data I pass, and I need tags
  58. * on the data to indicate what information I have
  59. * squirrelled away. ELF notes happen to provide
  60. * all of that that no need to invent something new.
  61. */
  62. buf = &crash_notes[cpu][0];
  63. memset(&prstatus, 0, sizeof(prstatus));
  64. prstatus.pr_pid = current->pid;
  65. elf_core_copy_regs(&prstatus.pr_reg, regs);
  66. buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
  67. sizeof(prstatus));
  68. final_note(buf);
  69. }
  70. static void crash_get_current_regs(struct pt_regs *regs)
  71. {
  72. __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
  73. __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
  74. __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
  75. __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
  76. __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
  77. __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
  78. __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
  79. __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
  80. __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
  81. __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
  82. __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
  83. __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
  84. __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
  85. regs->eip = (unsigned long)current_text_addr();
  86. }
  87. /* CPU does not save ss and esp on stack if execution is already
  88. * running in kernel mode at the time of NMI occurrence. This code
  89. * fixes it.
  90. */
  91. static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
  92. {
  93. memcpy(newregs, oldregs, sizeof(*newregs));
  94. newregs->esp = (unsigned long)&(oldregs->esp);
  95. __asm__ __volatile__("xorl %eax, %eax;");
  96. __asm__ __volatile__ ("movw %%ss, %%ax;" :"=a"(newregs->xss));
  97. }
  98. /* We may have saved_regs from where the error came from
  99. * or it is NULL if via a direct panic().
  100. */
  101. static void crash_save_self(struct pt_regs *saved_regs)
  102. {
  103. struct pt_regs regs;
  104. int cpu;
  105. cpu = smp_processor_id();
  106. if (saved_regs)
  107. crash_setup_regs(&regs, saved_regs);
  108. else
  109. crash_get_current_regs(&regs);
  110. crash_save_this_cpu(&regs, cpu);
  111. }
  112. #ifdef CONFIG_SMP
  113. static atomic_t waiting_for_crash_ipi;
  114. static int crash_nmi_callback(struct pt_regs *regs, int cpu)
  115. {
  116. struct pt_regs fixed_regs;
  117. /* Don't do anything if this handler is invoked on crashing cpu.
  118. * Otherwise, system will completely hang. Crashing cpu can get
  119. * an NMI if system was initially booted with nmi_watchdog parameter.
  120. */
  121. if (cpu == crashing_cpu)
  122. return 1;
  123. local_irq_disable();
  124. if (!user_mode(regs)) {
  125. crash_setup_regs(&fixed_regs, regs);
  126. regs = &fixed_regs;
  127. }
  128. crash_save_this_cpu(regs, cpu);
  129. atomic_dec(&waiting_for_crash_ipi);
  130. /* Assume hlt works */
  131. halt();
  132. for(;;);
  133. return 1;
  134. }
  135. /*
  136. * By using the NMI code instead of a vector we just sneak thru the
  137. * word generator coming out with just what we want. AND it does
  138. * not matter if clustered_apic_mode is set or not.
  139. */
  140. static void smp_send_nmi_allbutself(void)
  141. {
  142. send_IPI_allbutself(APIC_DM_NMI);
  143. }
  144. static void nmi_shootdown_cpus(void)
  145. {
  146. unsigned long msecs;
  147. atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
  148. /* Would it be better to replace the trap vector here? */
  149. set_nmi_callback(crash_nmi_callback);
  150. /* Ensure the new callback function is set before sending
  151. * out the NMI
  152. */
  153. wmb();
  154. smp_send_nmi_allbutself();
  155. msecs = 1000; /* Wait at most a second for the other cpus to stop */
  156. while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
  157. mdelay(1);
  158. msecs--;
  159. }
  160. /* Leave the nmi callback set */
  161. }
  162. #else
  163. static void nmi_shootdown_cpus(void)
  164. {
  165. /* There are no cpus to shootdown */
  166. }
  167. #endif
  168. void machine_crash_shutdown(struct pt_regs *regs)
  169. {
  170. /* This function is only called after the system
  171. * has paniced or is otherwise in a critical state.
  172. * The minimum amount of code to allow a kexec'd kernel
  173. * to run successfully needs to happen here.
  174. *
  175. * In practice this means shooting down the other cpus in
  176. * an SMP system.
  177. */
  178. /* The kernel is broken so disable interrupts */
  179. local_irq_disable();
  180. /* Make a note of crashing cpu. Will be used in NMI callback.*/
  181. crashing_cpu = smp_processor_id();
  182. nmi_shootdown_cpus();
  183. crash_save_self(regs);
  184. }