machine_kexec.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * machine_kexec.c - handle transition of Linux booting another kernel
  3. * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
  4. *
  5. * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  6. * LANDISK/sh4 supported by kogiidena
  7. *
  8. * This source code is licensed under the GNU General Public License,
  9. * Version 2. See the file COPYING for more details.
  10. */
  11. #include <linux/mm.h>
  12. #include <linux/kexec.h>
  13. #include <linux/delay.h>
  14. #include <linux/reboot.h>
  15. #include <linux/numa.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/suspend.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/pgalloc.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/io.h>
  22. #include <asm/cacheflush.h>
  23. typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
  24. unsigned long reboot_code_buffer,
  25. unsigned long start_address);
  26. extern const unsigned char relocate_new_kernel[];
  27. extern const unsigned int relocate_new_kernel_size;
  28. extern void *gdb_vbr_vector;
  29. extern void *vbr_base;
  30. void machine_shutdown(void)
  31. {
  32. }
  33. void machine_crash_shutdown(struct pt_regs *regs)
  34. {
  35. }
  36. /*
  37. * Do what every setup is needed on image and the
  38. * reboot code buffer to allow us to avoid allocations
  39. * later.
  40. */
  41. int machine_kexec_prepare(struct kimage *image)
  42. {
  43. return 0;
  44. }
  45. void machine_kexec_cleanup(struct kimage *image)
  46. {
  47. }
  48. static void kexec_info(struct kimage *image)
  49. {
  50. int i;
  51. printk("kexec information\n");
  52. for (i = 0; i < image->nr_segments; i++) {
  53. printk(" segment[%d]: 0x%08x - 0x%08x (0x%08x)\n",
  54. i,
  55. (unsigned int)image->segment[i].mem,
  56. (unsigned int)image->segment[i].mem +
  57. image->segment[i].memsz,
  58. (unsigned int)image->segment[i].memsz);
  59. }
  60. printk(" start : 0x%08x\n\n", (unsigned int)image->start);
  61. }
  62. /*
  63. * Do not allocate memory (or fail in any way) in machine_kexec().
  64. * We are past the point of no return, committed to rebooting now.
  65. */
  66. void machine_kexec(struct kimage *image)
  67. {
  68. unsigned long page_list;
  69. unsigned long reboot_code_buffer;
  70. relocate_new_kernel_t rnk;
  71. unsigned long entry;
  72. unsigned long *ptr;
  73. int save_ftrace_enabled;
  74. /*
  75. * Nicked from the mips version of machine_kexec():
  76. * The generic kexec code builds a page list with physical
  77. * addresses. Use phys_to_virt() to convert them to virtual.
  78. */
  79. for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
  80. ptr = (entry & IND_INDIRECTION) ?
  81. phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
  82. if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
  83. *ptr & IND_DESTINATION)
  84. *ptr = (unsigned long) phys_to_virt(*ptr);
  85. }
  86. #ifdef CONFIG_KEXEC_JUMP
  87. if (image->preserve_context)
  88. save_processor_state();
  89. #endif
  90. save_ftrace_enabled = __ftrace_enabled_save();
  91. /* Interrupts aren't acceptable while we reboot */
  92. local_irq_disable();
  93. page_list = image->head;
  94. /* we need both effective and real address here */
  95. reboot_code_buffer =
  96. (unsigned long)page_address(image->control_code_page);
  97. /* copy our kernel relocation code to the control code page */
  98. memcpy((void *)reboot_code_buffer, relocate_new_kernel,
  99. relocate_new_kernel_size);
  100. kexec_info(image);
  101. flush_cache_all();
  102. #if defined(CONFIG_SH_STANDARD_BIOS)
  103. asm volatile("ldc %0, vbr" :
  104. : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
  105. : "memory");
  106. #endif
  107. /* now call it */
  108. rnk = (relocate_new_kernel_t) reboot_code_buffer;
  109. (*rnk)(page_list, reboot_code_buffer,
  110. (unsigned long)phys_to_virt(image->start));
  111. #ifdef CONFIG_KEXEC_JUMP
  112. asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
  113. if (image->preserve_context)
  114. restore_processor_state();
  115. /* Convert page list back to physical addresses, what a mess. */
  116. for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
  117. ptr = (*ptr & IND_INDIRECTION) ?
  118. phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) {
  119. if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
  120. *ptr & IND_DESTINATION)
  121. *ptr = virt_to_phys(*ptr);
  122. }
  123. #endif
  124. __ftrace_enabled_restore(save_ftrace_enabled);
  125. }
  126. void arch_crash_save_vmcoreinfo(void)
  127. {
  128. #ifdef CONFIG_NUMA
  129. VMCOREINFO_SYMBOL(node_data);
  130. VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
  131. #endif
  132. }