relocate_kernel.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * relocate_kernel.S - put the kernel image in place to boot
  3. * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
  4. *
  5. * This source code is licensed under the GNU General Public License,
  6. * Version 2. See the file COPYING for more details.
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/page.h>
  10. #include <asm/kexec.h>
  11. /*
  12. * Must be relocatable PIC code callable as a C function
  13. */
  14. #define PTR(x) (x << 3)
  15. #define PAGE_ALIGNED (1 << PAGE_SHIFT)
  16. #define PAGE_ATTR 0x63 /* _PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY */
  17. .text
  18. .align PAGE_ALIGNED
  19. .code64
  20. .globl relocate_kernel
  21. relocate_kernel:
  22. /* %rdi indirection_page
  23. * %rsi page_list
  24. * %rdx start address
  25. */
  26. /* map the control page at its virtual address */
  27. movq $0x0000ff8000000000, %r10 /* mask */
  28. mov $(39 - 3), %cl /* bits to shift */
  29. movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
  30. movq %r11, %r9
  31. andq %r10, %r9
  32. shrq %cl, %r9
  33. movq PTR(VA_PGD)(%rsi), %r8
  34. addq %r8, %r9
  35. movq PTR(PA_PUD_0)(%rsi), %r8
  36. orq $PAGE_ATTR, %r8
  37. movq %r8, (%r9)
  38. shrq $9, %r10
  39. sub $9, %cl
  40. movq %r11, %r9
  41. andq %r10, %r9
  42. shrq %cl, %r9
  43. movq PTR(VA_PUD_0)(%rsi), %r8
  44. addq %r8, %r9
  45. movq PTR(PA_PMD_0)(%rsi), %r8
  46. orq $PAGE_ATTR, %r8
  47. movq %r8, (%r9)
  48. shrq $9, %r10
  49. sub $9, %cl
  50. movq %r11, %r9
  51. andq %r10, %r9
  52. shrq %cl, %r9
  53. movq PTR(VA_PMD_0)(%rsi), %r8
  54. addq %r8, %r9
  55. movq PTR(PA_PTE_0)(%rsi), %r8
  56. orq $PAGE_ATTR, %r8
  57. movq %r8, (%r9)
  58. shrq $9, %r10
  59. sub $9, %cl
  60. movq %r11, %r9
  61. andq %r10, %r9
  62. shrq %cl, %r9
  63. movq PTR(VA_PTE_0)(%rsi), %r8
  64. addq %r8, %r9
  65. movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
  66. orq $PAGE_ATTR, %r8
  67. movq %r8, (%r9)
  68. /* identity map the control page at its physical address */
  69. movq $0x0000ff8000000000, %r10 /* mask */
  70. mov $(39 - 3), %cl /* bits to shift */
  71. movq PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
  72. movq %r11, %r9
  73. andq %r10, %r9
  74. shrq %cl, %r9
  75. movq PTR(VA_PGD)(%rsi), %r8
  76. addq %r8, %r9
  77. movq PTR(PA_PUD_1)(%rsi), %r8
  78. orq $PAGE_ATTR, %r8
  79. movq %r8, (%r9)
  80. shrq $9, %r10
  81. sub $9, %cl
  82. movq %r11, %r9
  83. andq %r10, %r9
  84. shrq %cl, %r9
  85. movq PTR(VA_PUD_1)(%rsi), %r8
  86. addq %r8, %r9
  87. movq PTR(PA_PMD_1)(%rsi), %r8
  88. orq $PAGE_ATTR, %r8
  89. movq %r8, (%r9)
  90. shrq $9, %r10
  91. sub $9, %cl
  92. movq %r11, %r9
  93. andq %r10, %r9
  94. shrq %cl, %r9
  95. movq PTR(VA_PMD_1)(%rsi), %r8
  96. addq %r8, %r9
  97. movq PTR(PA_PTE_1)(%rsi), %r8
  98. orq $PAGE_ATTR, %r8
  99. movq %r8, (%r9)
  100. shrq $9, %r10
  101. sub $9, %cl
  102. movq %r11, %r9
  103. andq %r10, %r9
  104. shrq %cl, %r9
  105. movq PTR(VA_PTE_1)(%rsi), %r8
  106. addq %r8, %r9
  107. movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
  108. orq $PAGE_ATTR, %r8
  109. movq %r8, (%r9)
  110. relocate_new_kernel:
  111. /* %rdi indirection_page
  112. * %rsi page_list
  113. * %rdx start address
  114. */
  115. /* zero out flags, and disable interrupts */
  116. pushq $0
  117. popfq
  118. /* get physical address of control page now */
  119. /* this is impossible after page table switch */
  120. movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
  121. /* get physical address of page table now too */
  122. movq PTR(PA_TABLE_PAGE)(%rsi), %rcx
  123. /* switch to new set of page tables */
  124. movq PTR(PA_PGD)(%rsi), %r9
  125. movq %r9, %cr3
  126. /* setup a new stack at the end of the physical control page */
  127. lea 4096(%r8), %rsp
  128. /* jump to identity mapped page */
  129. addq $(identity_mapped - relocate_kernel), %r8
  130. pushq %r8
  131. ret
  132. identity_mapped:
  133. /* store the start address on the stack */
  134. pushq %rdx
  135. /* Set cr0 to a known state:
  136. * 31 1 == Paging enabled
  137. * 18 0 == Alignment check disabled
  138. * 16 0 == Write protect disabled
  139. * 3 0 == No task switch
  140. * 2 0 == Don't do FP software emulation.
  141. * 0 1 == Proctected mode enabled
  142. */
  143. movq %cr0, %rax
  144. andq $~((1<<18)|(1<<16)|(1<<3)|(1<<2)), %rax
  145. orl $((1<<31)|(1<<0)), %eax
  146. movq %rax, %cr0
  147. /* Set cr4 to a known state:
  148. * 10 0 == xmm exceptions disabled
  149. * 9 0 == xmm registers instructions disabled
  150. * 8 0 == performance monitoring counter disabled
  151. * 7 0 == page global disabled
  152. * 6 0 == machine check exceptions disabled
  153. * 5 1 == physical address extension enabled
  154. * 4 0 == page size extensions disabled
  155. * 3 0 == Debug extensions disabled
  156. * 2 0 == Time stamp disable (disabled)
  157. * 1 0 == Protected mode virtual interrupts disabled
  158. * 0 0 == VME disabled
  159. */
  160. movq $((1<<5)), %rax
  161. movq %rax, %cr4
  162. jmp 1f
  163. 1:
  164. /* Switch to the identity mapped page tables,
  165. * and flush the TLB.
  166. */
  167. movq %rcx, %cr3
  168. /* Do the copies */
  169. movq %rdi, %rcx /* Put the page_list in %rcx */
  170. xorq %rdi, %rdi
  171. xorq %rsi, %rsi
  172. jmp 1f
  173. 0: /* top, read another word for the indirection page */
  174. movq (%rbx), %rcx
  175. addq $8, %rbx
  176. 1:
  177. testq $0x1, %rcx /* is it a destination page? */
  178. jz 2f
  179. movq %rcx, %rdi
  180. andq $0xfffffffffffff000, %rdi
  181. jmp 0b
  182. 2:
  183. testq $0x2, %rcx /* is it an indirection page? */
  184. jz 2f
  185. movq %rcx, %rbx
  186. andq $0xfffffffffffff000, %rbx
  187. jmp 0b
  188. 2:
  189. testq $0x4, %rcx /* is it the done indicator? */
  190. jz 2f
  191. jmp 3f
  192. 2:
  193. testq $0x8, %rcx /* is it the source indicator? */
  194. jz 0b /* Ignore it otherwise */
  195. movq %rcx, %rsi /* For ever source page do a copy */
  196. andq $0xfffffffffffff000, %rsi
  197. movq $512, %rcx
  198. rep ; movsq
  199. jmp 0b
  200. 3:
  201. /* To be certain of avoiding problems with self-modifying code
  202. * I need to execute a serializing instruction here.
  203. * So I flush the TLB by reloading %cr3 here, it's handy,
  204. * and not processor dependent.
  205. */
  206. movq %cr3, %rax
  207. movq %rax, %cr3
  208. /* set all of the registers to known values */
  209. /* leave %rsp alone */
  210. xorq %rax, %rax
  211. xorq %rbx, %rbx
  212. xorq %rcx, %rcx
  213. xorq %rdx, %rdx
  214. xorq %rsi, %rsi
  215. xorq %rdi, %rdi
  216. xorq %rbp, %rbp
  217. xorq %r8, %r8
  218. xorq %r9, %r9
  219. xorq %r10, %r9
  220. xorq %r11, %r11
  221. xorq %r12, %r12
  222. xorq %r13, %r13
  223. xorq %r14, %r14
  224. xorq %r15, %r15
  225. ret