machine_kexec.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /*
  2. * machine_kexec.c - handle transition of Linux booting another kernel
  3. * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
  4. *
  5. * This source code is licensed under the GNU General Public License,
  6. * Version 2. See the file COPYING for more details.
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/kexec.h>
  10. #include <linux/string.h>
  11. #include <linux/reboot.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/io.h>
  16. static void init_level2_page(pmd_t *level2p, unsigned long addr)
  17. {
  18. unsigned long end_addr;
  19. addr &= PAGE_MASK;
  20. end_addr = addr + PUD_SIZE;
  21. while (addr < end_addr) {
  22. set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
  23. addr += PMD_SIZE;
  24. }
  25. }
  26. static int init_level3_page(struct kimage *image, pud_t *level3p,
  27. unsigned long addr, unsigned long last_addr)
  28. {
  29. unsigned long end_addr;
  30. int result;
  31. result = 0;
  32. addr &= PAGE_MASK;
  33. end_addr = addr + PGDIR_SIZE;
  34. while ((addr < last_addr) && (addr < end_addr)) {
  35. struct page *page;
  36. pmd_t *level2p;
  37. page = kimage_alloc_control_pages(image, 0);
  38. if (!page) {
  39. result = -ENOMEM;
  40. goto out;
  41. }
  42. level2p = (pmd_t *)page_address(page);
  43. init_level2_page(level2p, addr);
  44. set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
  45. addr += PUD_SIZE;
  46. }
  47. /* clear the unused entries */
  48. while (addr < end_addr) {
  49. pud_clear(level3p++);
  50. addr += PUD_SIZE;
  51. }
  52. out:
  53. return result;
  54. }
  55. static int init_level4_page(struct kimage *image, pgd_t *level4p,
  56. unsigned long addr, unsigned long last_addr)
  57. {
  58. unsigned long end_addr;
  59. int result;
  60. result = 0;
  61. addr &= PAGE_MASK;
  62. end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
  63. while ((addr < last_addr) && (addr < end_addr)) {
  64. struct page *page;
  65. pud_t *level3p;
  66. page = kimage_alloc_control_pages(image, 0);
  67. if (!page) {
  68. result = -ENOMEM;
  69. goto out;
  70. }
  71. level3p = (pud_t *)page_address(page);
  72. result = init_level3_page(image, level3p, addr, last_addr);
  73. if (result) {
  74. goto out;
  75. }
  76. set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
  77. addr += PGDIR_SIZE;
  78. }
  79. /* clear the unused entries */
  80. while (addr < end_addr) {
  81. pgd_clear(level4p++);
  82. addr += PGDIR_SIZE;
  83. }
  84. out:
  85. return result;
  86. }
  87. static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
  88. {
  89. pgd_t *level4p;
  90. level4p = (pgd_t *)__va(start_pgtable);
  91. return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
  92. }
  93. static void set_idt(void *newidt, u16 limit)
  94. {
  95. struct desc_ptr curidt;
  96. /* x86-64 supports unaliged loads & stores */
  97. curidt.size = limit;
  98. curidt.address = (unsigned long)newidt;
  99. __asm__ __volatile__ (
  100. "lidtq %0\n"
  101. : : "m" (curidt)
  102. );
  103. };
  104. static void set_gdt(void *newgdt, u16 limit)
  105. {
  106. struct desc_ptr curgdt;
  107. /* x86-64 supports unaligned loads & stores */
  108. curgdt.size = limit;
  109. curgdt.address = (unsigned long)newgdt;
  110. __asm__ __volatile__ (
  111. "lgdtq %0\n"
  112. : : "m" (curgdt)
  113. );
  114. };
  115. static void load_segments(void)
  116. {
  117. __asm__ __volatile__ (
  118. "\tmovl %0,%%ds\n"
  119. "\tmovl %0,%%es\n"
  120. "\tmovl %0,%%ss\n"
  121. "\tmovl %0,%%fs\n"
  122. "\tmovl %0,%%gs\n"
  123. : : "a" (__KERNEL_DS)
  124. );
  125. }
  126. typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
  127. unsigned long control_code_buffer,
  128. unsigned long start_address,
  129. unsigned long pgtable) ATTRIB_NORET;
  130. const extern unsigned char relocate_new_kernel[];
  131. const extern unsigned long relocate_new_kernel_size;
  132. int machine_kexec_prepare(struct kimage *image)
  133. {
  134. unsigned long start_pgtable, control_code_buffer;
  135. int result;
  136. /* Calculate the offsets */
  137. start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
  138. control_code_buffer = start_pgtable + PAGE_SIZE;
  139. /* Setup the identity mapped 64bit page table */
  140. result = init_pgtable(image, start_pgtable);
  141. if (result)
  142. return result;
  143. /* Place the code in the reboot code buffer */
  144. memcpy(__va(control_code_buffer), relocate_new_kernel,
  145. relocate_new_kernel_size);
  146. return 0;
  147. }
  148. void machine_kexec_cleanup(struct kimage *image)
  149. {
  150. return;
  151. }
  152. /*
  153. * Do not allocate memory (or fail in any way) in machine_kexec().
  154. * We are past the point of no return, committed to rebooting now.
  155. */
  156. NORET_TYPE void machine_kexec(struct kimage *image)
  157. {
  158. unsigned long page_list;
  159. unsigned long control_code_buffer;
  160. unsigned long start_pgtable;
  161. relocate_new_kernel_t rnk;
  162. /* Interrupts aren't acceptable while we reboot */
  163. local_irq_disable();
  164. /* Calculate the offsets */
  165. page_list = image->head;
  166. start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
  167. control_code_buffer = start_pgtable + PAGE_SIZE;
  168. /* Set the low half of the page table to my identity mapped
  169. * page table for kexec. Leave the high half pointing at the
  170. * kernel pages. Don't bother to flush the global pages
  171. * as that will happen when I fully switch to my identity mapped
  172. * page table anyway.
  173. */
  174. memcpy(__va(read_cr3()), __va(start_pgtable), PAGE_SIZE/2);
  175. __flush_tlb();
  176. /* The segment registers are funny things, they are
  177. * automatically loaded from a table, in memory wherever you
  178. * set them to a specific selector, but this table is never
  179. * accessed again unless you set the segment to a different selector.
  180. *
  181. * The more common model are caches where the behide
  182. * the scenes work is done, but is also dropped at arbitrary
  183. * times.
  184. *
  185. * I take advantage of this here by force loading the
  186. * segments, before I zap the gdt with an invalid value.
  187. */
  188. load_segments();
  189. /* The gdt & idt are now invalid.
  190. * If you want to load them you must set up your own idt & gdt.
  191. */
  192. set_gdt(phys_to_virt(0),0);
  193. set_idt(phys_to_virt(0),0);
  194. /* now call it */
  195. rnk = (relocate_new_kernel_t) control_code_buffer;
  196. (*rnk)(page_list, control_code_buffer, image->start, start_pgtable);
  197. }