kexec.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #ifndef ASM_X86__KEXEC_H
  2. #define ASM_X86__KEXEC_H
  3. #ifdef CONFIG_X86_32
  4. # define PA_CONTROL_PAGE 0
  5. # define VA_CONTROL_PAGE 1
  6. # define PA_PGD 2
  7. # define VA_PGD 3
  8. # define PA_PTE_0 4
  9. # define VA_PTE_0 5
  10. # define PA_PTE_1 6
  11. # define VA_PTE_1 7
  12. # define PA_SWAP_PAGE 8
  13. # ifdef CONFIG_X86_PAE
  14. # define PA_PMD_0 9
  15. # define VA_PMD_0 10
  16. # define PA_PMD_1 11
  17. # define VA_PMD_1 12
  18. # define PAGES_NR 13
  19. # else
  20. # define PAGES_NR 9
  21. # endif
  22. #else
  23. # define PA_CONTROL_PAGE 0
  24. # define VA_CONTROL_PAGE 1
  25. # define PA_PGD 2
  26. # define VA_PGD 3
  27. # define PA_PUD_0 4
  28. # define VA_PUD_0 5
  29. # define PA_PMD_0 6
  30. # define VA_PMD_0 7
  31. # define PA_PTE_0 8
  32. # define VA_PTE_0 9
  33. # define PA_PUD_1 10
  34. # define VA_PUD_1 11
  35. # define PA_PMD_1 12
  36. # define VA_PMD_1 13
  37. # define PA_PTE_1 14
  38. # define VA_PTE_1 15
  39. # define PA_TABLE_PAGE 16
  40. # define PAGES_NR 17
  41. #endif
  42. #ifdef CONFIG_X86_32
  43. # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
  44. #endif
  45. #ifndef __ASSEMBLY__
  46. #include <linux/string.h>
  47. #include <asm/page.h>
  48. #include <asm/ptrace.h>
  49. /*
  50. * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  51. * I.e. Maximum page that is mapped directly into kernel memory,
  52. * and kmap is not required.
  53. *
  54. * So far x86_64 is limited to 40 physical address bits.
  55. */
  56. #ifdef CONFIG_X86_32
  57. /* Maximum physical address we can use pages from */
  58. # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
  59. /* Maximum address we can reach in physical address mode */
  60. # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
  61. /* Maximum address we can use for the control code buffer */
  62. # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
  63. # define KEXEC_CONTROL_PAGE_SIZE 4096
  64. /* The native architecture */
  65. # define KEXEC_ARCH KEXEC_ARCH_386
  66. /* We can also handle crash dumps from 64 bit kernel. */
  67. # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
  68. #else
  69. /* Maximum physical address we can use pages from */
  70. # define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL)
  71. /* Maximum address we can reach in physical address mode */
  72. # define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL)
  73. /* Maximum address we can use for the control pages */
  74. # define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
  75. /* Allocate one page for the pdp and the second for the code */
  76. # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
  77. /* The native architecture */
  78. # define KEXEC_ARCH KEXEC_ARCH_X86_64
  79. #endif
  80. /*
  81. * CPU does not save ss and sp on stack if execution is already
  82. * running in kernel mode at the time of NMI occurrence. This code
  83. * fixes it.
  84. */
  85. static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
  86. struct pt_regs *oldregs)
  87. {
  88. #ifdef CONFIG_X86_32
  89. newregs->sp = (unsigned long)&(oldregs->sp);
  90. asm volatile("xorl %%eax, %%eax\n\t"
  91. "movw %%ss, %%ax\n\t"
  92. :"=a"(newregs->ss));
  93. #endif
  94. }
  95. /*
  96. * This function is responsible for capturing register states if coming
  97. * via panic otherwise just fix up the ss and sp if coming via kernel
  98. * mode exception.
  99. */
  100. static inline void crash_setup_regs(struct pt_regs *newregs,
  101. struct pt_regs *oldregs)
  102. {
  103. if (oldregs) {
  104. memcpy(newregs, oldregs, sizeof(*newregs));
  105. crash_fixup_ss_esp(newregs, oldregs);
  106. } else {
  107. #ifdef CONFIG_X86_32
  108. asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
  109. asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
  110. asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
  111. asm volatile("movl %%esi,%0" : "=m"(newregs->si));
  112. asm volatile("movl %%edi,%0" : "=m"(newregs->di));
  113. asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
  114. asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
  115. asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
  116. asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
  117. asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
  118. asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
  119. asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
  120. asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
  121. #else
  122. asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
  123. asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
  124. asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
  125. asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
  126. asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
  127. asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
  128. asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
  129. asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
  130. asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
  131. asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
  132. asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
  133. asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
  134. asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
  135. asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
  136. asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
  137. asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
  138. asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
  139. asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
  140. asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
  141. #endif
  142. newregs->ip = (unsigned long)current_text_addr();
  143. }
  144. }
  145. #ifdef CONFIG_X86_32
  146. asmlinkage unsigned long
  147. relocate_kernel(unsigned long indirection_page,
  148. unsigned long control_page,
  149. unsigned long start_address,
  150. unsigned int has_pae,
  151. unsigned int preserve_context);
  152. #else
  153. NORET_TYPE void
  154. relocate_kernel(unsigned long indirection_page,
  155. unsigned long page_list,
  156. unsigned long start_address) ATTRIB_NORET;
  157. #endif
  158. #endif /* __ASSEMBLY__ */
  159. #endif /* ASM_X86__KEXEC_H */