machine_kexec_64.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * handle transition of Linux booting another kernel
  3. * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
  4. *
  5. * This source code is licensed under the GNU General Public License,
  6. * Version 2. See the file COPYING for more details.
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/kexec.h>
  10. #include <linux/string.h>
  11. #include <linux/reboot.h>
  12. #include <linux/numa.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/io.h>
  15. #include <linux/suspend.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/mmu_context.h>
  19. static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
  20. unsigned long addr)
  21. {
  22. pud_t *pud;
  23. pmd_t *pmd;
  24. struct page *page;
  25. int result = -ENOMEM;
  26. addr &= PMD_MASK;
  27. pgd += pgd_index(addr);
  28. if (!pgd_present(*pgd)) {
  29. page = kimage_alloc_control_pages(image, 0);
  30. if (!page)
  31. goto out;
  32. pud = (pud_t *)page_address(page);
  33. memset(pud, 0, PAGE_SIZE);
  34. set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
  35. }
  36. pud = pud_offset(pgd, addr);
  37. if (!pud_present(*pud)) {
  38. page = kimage_alloc_control_pages(image, 0);
  39. if (!page)
  40. goto out;
  41. pmd = (pmd_t *)page_address(page);
  42. memset(pmd, 0, PAGE_SIZE);
  43. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  44. }
  45. pmd = pmd_offset(pud, addr);
  46. if (!pmd_present(*pmd))
  47. set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
  48. result = 0;
  49. out:
  50. return result;
  51. }
  52. static void init_level2_page(pmd_t *level2p, unsigned long addr)
  53. {
  54. unsigned long end_addr;
  55. addr &= PAGE_MASK;
  56. end_addr = addr + PUD_SIZE;
  57. while (addr < end_addr) {
  58. set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
  59. addr += PMD_SIZE;
  60. }
  61. }
  62. static int init_level3_page(struct kimage *image, pud_t *level3p,
  63. unsigned long addr, unsigned long last_addr)
  64. {
  65. unsigned long end_addr;
  66. int result;
  67. result = 0;
  68. addr &= PAGE_MASK;
  69. end_addr = addr + PGDIR_SIZE;
  70. while ((addr < last_addr) && (addr < end_addr)) {
  71. struct page *page;
  72. pmd_t *level2p;
  73. page = kimage_alloc_control_pages(image, 0);
  74. if (!page) {
  75. result = -ENOMEM;
  76. goto out;
  77. }
  78. level2p = (pmd_t *)page_address(page);
  79. init_level2_page(level2p, addr);
  80. set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
  81. addr += PUD_SIZE;
  82. }
  83. /* clear the unused entries */
  84. while (addr < end_addr) {
  85. pud_clear(level3p++);
  86. addr += PUD_SIZE;
  87. }
  88. out:
  89. return result;
  90. }
  91. static int init_level4_page(struct kimage *image, pgd_t *level4p,
  92. unsigned long addr, unsigned long last_addr)
  93. {
  94. unsigned long end_addr;
  95. int result;
  96. result = 0;
  97. addr &= PAGE_MASK;
  98. end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
  99. while ((addr < last_addr) && (addr < end_addr)) {
  100. struct page *page;
  101. pud_t *level3p;
  102. page = kimage_alloc_control_pages(image, 0);
  103. if (!page) {
  104. result = -ENOMEM;
  105. goto out;
  106. }
  107. level3p = (pud_t *)page_address(page);
  108. result = init_level3_page(image, level3p, addr, last_addr);
  109. if (result)
  110. goto out;
  111. set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
  112. addr += PGDIR_SIZE;
  113. }
  114. /* clear the unused entries */
  115. while (addr < end_addr) {
  116. pgd_clear(level4p++);
  117. addr += PGDIR_SIZE;
  118. }
  119. out:
  120. return result;
  121. }
  122. static void free_transition_pgtable(struct kimage *image)
  123. {
  124. free_page((unsigned long)image->arch.pud);
  125. free_page((unsigned long)image->arch.pmd);
  126. free_page((unsigned long)image->arch.pte);
  127. }
  128. static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
  129. {
  130. pud_t *pud;
  131. pmd_t *pmd;
  132. pte_t *pte;
  133. unsigned long vaddr, paddr;
  134. int result = -ENOMEM;
  135. vaddr = (unsigned long)relocate_kernel;
  136. paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
  137. pgd += pgd_index(vaddr);
  138. if (!pgd_present(*pgd)) {
  139. pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
  140. if (!pud)
  141. goto err;
  142. image->arch.pud = pud;
  143. set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
  144. }
  145. pud = pud_offset(pgd, vaddr);
  146. if (!pud_present(*pud)) {
  147. pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
  148. if (!pmd)
  149. goto err;
  150. image->arch.pmd = pmd;
  151. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  152. }
  153. pmd = pmd_offset(pud, vaddr);
  154. if (!pmd_present(*pmd)) {
  155. pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
  156. if (!pte)
  157. goto err;
  158. image->arch.pte = pte;
  159. set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
  160. }
  161. pte = pte_offset_kernel(pmd, vaddr);
  162. set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
  163. return 0;
  164. err:
  165. free_transition_pgtable(image);
  166. return result;
  167. }
  168. static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
  169. {
  170. pgd_t *level4p;
  171. int result;
  172. level4p = (pgd_t *)__va(start_pgtable);
  173. result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
  174. if (result)
  175. return result;
  176. /*
  177. * image->start may be outside 0 ~ max_pfn, for example when
  178. * jump back to original kernel from kexeced kernel
  179. */
  180. result = init_one_level2_page(image, level4p, image->start);
  181. if (result)
  182. return result;
  183. return init_transition_pgtable(image, level4p);
  184. }
  185. static void set_idt(void *newidt, u16 limit)
  186. {
  187. struct desc_ptr curidt;
  188. /* x86-64 supports unaliged loads & stores */
  189. curidt.size = limit;
  190. curidt.address = (unsigned long)newidt;
  191. __asm__ __volatile__ (
  192. "lidtq %0\n"
  193. : : "m" (curidt)
  194. );
  195. };
  196. static void set_gdt(void *newgdt, u16 limit)
  197. {
  198. struct desc_ptr curgdt;
  199. /* x86-64 supports unaligned loads & stores */
  200. curgdt.size = limit;
  201. curgdt.address = (unsigned long)newgdt;
  202. __asm__ __volatile__ (
  203. "lgdtq %0\n"
  204. : : "m" (curgdt)
  205. );
  206. };
  207. static void load_segments(void)
  208. {
  209. __asm__ __volatile__ (
  210. "\tmovl %0,%%ds\n"
  211. "\tmovl %0,%%es\n"
  212. "\tmovl %0,%%ss\n"
  213. "\tmovl %0,%%fs\n"
  214. "\tmovl %0,%%gs\n"
  215. : : "a" (__KERNEL_DS) : "memory"
  216. );
  217. }
  218. int machine_kexec_prepare(struct kimage *image)
  219. {
  220. unsigned long start_pgtable;
  221. int result;
  222. /* Calculate the offsets */
  223. start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
  224. /* Setup the identity mapped 64bit page table */
  225. result = init_pgtable(image, start_pgtable);
  226. if (result)
  227. return result;
  228. return 0;
  229. }
  230. void machine_kexec_cleanup(struct kimage *image)
  231. {
  232. free_transition_pgtable(image);
  233. }
  234. /*
  235. * Do not allocate memory (or fail in any way) in machine_kexec().
  236. * We are past the point of no return, committed to rebooting now.
  237. */
  238. void machine_kexec(struct kimage *image)
  239. {
  240. unsigned long page_list[PAGES_NR];
  241. void *control_page;
  242. int save_ftrace_enabled;
  243. #ifdef CONFIG_KEXEC_JUMP
  244. if (kexec_image->preserve_context)
  245. save_processor_state();
  246. #endif
  247. save_ftrace_enabled = __ftrace_enabled_save();
  248. /* Interrupts aren't acceptable while we reboot */
  249. local_irq_disable();
  250. if (image->preserve_context) {
  251. #ifdef CONFIG_X86_IO_APIC
  252. /*
  253. * We need to put APICs in legacy mode so that we can
  254. * get timer interrupts in second kernel. kexec/kdump
  255. * paths already have calls to disable_IO_APIC() in
  256. * one form or other. kexec jump path also need
  257. * one.
  258. */
  259. disable_IO_APIC();
  260. #endif
  261. }
  262. control_page = page_address(image->control_code_page) + PAGE_SIZE;
  263. memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
  264. page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
  265. page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
  266. page_list[PA_TABLE_PAGE] =
  267. (unsigned long)__pa(page_address(image->control_code_page));
  268. if (image->type == KEXEC_TYPE_DEFAULT)
  269. page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
  270. << PAGE_SHIFT);
  271. /*
  272. * The segment registers are funny things, they have both a
  273. * visible and an invisible part. Whenever the visible part is
  274. * set to a specific selector, the invisible part is loaded
  275. * with from a table in memory. At no other time is the
  276. * descriptor table in memory accessed.
  277. *
  278. * I take advantage of this here by force loading the
  279. * segments, before I zap the gdt with an invalid value.
  280. */
  281. load_segments();
  282. /*
  283. * The gdt & idt are now invalid.
  284. * If you want to load them you must set up your own idt & gdt.
  285. */
  286. set_gdt(phys_to_virt(0), 0);
  287. set_idt(phys_to_virt(0), 0);
  288. /* now call it */
  289. image->start = relocate_kernel((unsigned long)image->head,
  290. (unsigned long)page_list,
  291. image->start,
  292. image->preserve_context);
  293. #ifdef CONFIG_KEXEC_JUMP
  294. if (kexec_image->preserve_context)
  295. restore_processor_state();
  296. #endif
  297. __ftrace_enabled_restore(save_ftrace_enabled);
  298. }
  299. void arch_crash_save_vmcoreinfo(void)
  300. {
  301. VMCOREINFO_SYMBOL(phys_base);
  302. VMCOREINFO_SYMBOL(init_level4_pgt);
  303. #ifdef CONFIG_NUMA
  304. VMCOREINFO_SYMBOL(node_data);
  305. VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
  306. #endif
  307. }