head_64.S 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * linux/boot/head.S
  3. *
  4. * Copyright (C) 1991, 1992, 1993 Linus Torvalds
  5. */
  6. /*
  7. * head.S contains the 32-bit startup code.
  8. *
  9. * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
  10. * the page directory will exist. The startup code will be overwritten by
  11. * the page directory. [According to comments etc elsewhere on a compressed
  12. * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
  13. *
  14. * Page 0 is deliberately kept safe, since System Management Mode code in
  15. * laptops may need to access the BIOS data stored there. This is also
  16. * useful for future device drivers that either access the BIOS via VM86
  17. * mode.
  18. */
  19. /*
  20. * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
  21. */
  22. .code32
  23. .text
  24. #include <linux/linkage.h>
  25. #include <asm/segment.h>
  26. #include <asm/pgtable_types.h>
  27. #include <asm/page_types.h>
  28. #include <asm/boot.h>
  29. #include <asm/msr.h>
  30. #include <asm/processor-flags.h>
  31. #include <asm/asm-offsets.h>
  32. .section ".text.head"
  33. .code32
  34. ENTRY(startup_32)
  35. cld
  36. /* test KEEP_SEGMENTS flag to see if the bootloader is asking
  37. * us to not reload segments */
  38. testb $(1<<6), BP_loadflags(%esi)
  39. jnz 1f
  40. cli
  41. movl $(__KERNEL_DS), %eax
  42. movl %eax, %ds
  43. movl %eax, %es
  44. movl %eax, %ss
  45. 1:
  46. /* Calculate the delta between where we were compiled to run
  47. * at and where we were actually loaded at. This can only be done
  48. * with a short local call on x86. Nothing else will tell us what
  49. * address we are running at. The reserved chunk of the real-mode
  50. * data at 0x1e4 (defined as a scratch field) are used as the stack
  51. * for this calculation. Only 4 bytes are needed.
  52. */
  53. leal (0x1e4+4)(%esi), %esp
  54. call 1f
  55. 1: popl %ebp
  56. subl $1b, %ebp
  57. /* setup a stack and make sure cpu supports long mode. */
  58. movl $boot_stack_end, %eax
  59. addl %ebp, %eax
  60. movl %eax, %esp
  61. call verify_cpu
  62. testl %eax, %eax
  63. jnz no_longmode
  64. /* Compute the delta between where we were compiled to run at
  65. * and where the code will actually run at.
  66. */
  67. /* %ebp contains the address we are loaded at by the boot loader and %ebx
  68. * contains the address where we should move the kernel image temporarily
  69. * for safe in-place decompression.
  70. */
  71. #ifdef CONFIG_RELOCATABLE
  72. movl %ebp, %ebx
  73. addl $(PMD_PAGE_SIZE -1), %ebx
  74. andl $PMD_PAGE_MASK, %ebx
  75. #else
  76. movl $CONFIG_PHYSICAL_START, %ebx
  77. #endif
  78. /* Replace the compressed data size with the uncompressed size */
  79. subl input_len(%ebp), %ebx
  80. movl output_len(%ebp), %eax
  81. addl %eax, %ebx
  82. /* Add 8 bytes for every 32K input block */
  83. shrl $12, %eax
  84. addl %eax, %ebx
  85. /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */
  86. addl $(32768 + 18 + 4095), %ebx
  87. andl $~4095, %ebx
  88. /*
  89. * Prepare for entering 64 bit mode
  90. */
  91. /* Load new GDT with the 64bit segments using 32bit descriptor */
  92. leal gdt(%ebp), %eax
  93. movl %eax, gdt+2(%ebp)
  94. lgdt gdt(%ebp)
  95. /* Enable PAE mode */
  96. xorl %eax, %eax
  97. orl $(X86_CR4_PAE), %eax
  98. movl %eax, %cr4
  99. /*
  100. * Build early 4G boot pagetable
  101. */
  102. /* Initialize Page tables to 0*/
  103. leal pgtable(%ebx), %edi
  104. xorl %eax, %eax
  105. movl $((4096*6)/4), %ecx
  106. rep stosl
  107. /* Build Level 4 */
  108. leal pgtable + 0(%ebx), %edi
  109. leal 0x1007 (%edi), %eax
  110. movl %eax, 0(%edi)
  111. /* Build Level 3 */
  112. leal pgtable + 0x1000(%ebx), %edi
  113. leal 0x1007(%edi), %eax
  114. movl $4, %ecx
  115. 1: movl %eax, 0x00(%edi)
  116. addl $0x00001000, %eax
  117. addl $8, %edi
  118. decl %ecx
  119. jnz 1b
  120. /* Build Level 2 */
  121. leal pgtable + 0x2000(%ebx), %edi
  122. movl $0x00000183, %eax
  123. movl $2048, %ecx
  124. 1: movl %eax, 0(%edi)
  125. addl $0x00200000, %eax
  126. addl $8, %edi
  127. decl %ecx
  128. jnz 1b
  129. /* Enable the boot page tables */
  130. leal pgtable(%ebx), %eax
  131. movl %eax, %cr3
  132. /* Enable Long mode in EFER (Extended Feature Enable Register) */
  133. movl $MSR_EFER, %ecx
  134. rdmsr
  135. btsl $_EFER_LME, %eax
  136. wrmsr
  137. /* Setup for the jump to 64bit mode
  138. *
  139. * When the jump is performend we will be in long mode but
  140. * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
  141. * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
  142. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
  143. * We place all of the values on our mini stack so lret can
  144. * used to perform that far jump.
  145. */
  146. pushl $__KERNEL_CS
  147. leal startup_64(%ebp), %eax
  148. pushl %eax
  149. /* Enter paged protected Mode, activating Long Mode */
  150. movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
  151. movl %eax, %cr0
  152. /* Jump from 32bit compatibility mode into 64bit mode. */
  153. lret
  154. ENDPROC(startup_32)
  155. no_longmode:
  156. /* This isn't an x86-64 CPU so hang */
  157. 1:
  158. hlt
  159. jmp 1b
  160. #include "../../kernel/verify_cpu_64.S"
  161. /* Be careful here startup_64 needs to be at a predictable
  162. * address so I can export it in an ELF header. Bootloaders
  163. * should look at the ELF header to find this address, as
  164. * it may change in the future.
  165. */
  166. .code64
  167. .org 0x200
  168. ENTRY(startup_64)
  169. /* We come here either from startup_32 or directly from a
  170. * 64bit bootloader. If we come here from a bootloader we depend on
  171. * an identity mapped page table being provied that maps our
  172. * entire text+data+bss and hopefully all of memory.
  173. */
  174. /* Setup data segments. */
  175. xorl %eax, %eax
  176. movl %eax, %ds
  177. movl %eax, %es
  178. movl %eax, %ss
  179. movl %eax, %fs
  180. movl %eax, %gs
  181. lldt %ax
  182. movl $0x20, %eax
  183. ltr %ax
  184. /* Compute the decompressed kernel start address. It is where
  185. * we were loaded at aligned to a 2M boundary. %rbp contains the
  186. * decompressed kernel start address.
  187. *
  188. * If it is a relocatable kernel then decompress and run the kernel
  189. * from load address aligned to 2MB addr, otherwise decompress and
  190. * run the kernel from CONFIG_PHYSICAL_START
  191. */
  192. /* Start with the delta to where the kernel will run at. */
  193. #ifdef CONFIG_RELOCATABLE
  194. leaq startup_32(%rip) /* - $startup_32 */, %rbp
  195. addq $(PMD_PAGE_SIZE - 1), %rbp
  196. andq $PMD_PAGE_MASK, %rbp
  197. movq %rbp, %rbx
  198. #else
  199. movq $CONFIG_PHYSICAL_START, %rbp
  200. movq %rbp, %rbx
  201. #endif
  202. /* Replace the compressed data size with the uncompressed size */
  203. movl input_len(%rip), %eax
  204. subq %rax, %rbx
  205. movl output_len(%rip), %eax
  206. addq %rax, %rbx
  207. /* Add 8 bytes for every 32K input block */
  208. shrq $12, %rax
  209. addq %rax, %rbx
  210. /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */
  211. addq $(32768 + 18 + 4095), %rbx
  212. andq $~4095, %rbx
  213. /* Copy the compressed kernel to the end of our buffer
  214. * where decompression in place becomes safe.
  215. */
  216. leaq _end_before_pgt(%rip), %r8
  217. leaq _end_before_pgt(%rbx), %r9
  218. movq $_end_before_pgt /* - $startup_32 */, %rcx
  219. 1: subq $8, %r8
  220. subq $8, %r9
  221. movq 0(%r8), %rax
  222. movq %rax, 0(%r9)
  223. subq $8, %rcx
  224. jnz 1b
  225. /*
  226. * Jump to the relocated address.
  227. */
  228. leaq relocated(%rbx), %rax
  229. jmp *%rax
  230. .section ".text"
  231. relocated:
  232. /*
  233. * Clear BSS
  234. */
  235. xorq %rax, %rax
  236. leaq _edata(%rbx), %rdi
  237. leaq _end_before_pgt(%rbx), %rcx
  238. subq %rdi, %rcx
  239. cld
  240. rep
  241. stosb
  242. /* Setup the stack */
  243. leaq boot_stack_end(%rip), %rsp
  244. /* zero EFLAGS after setting rsp */
  245. pushq $0
  246. popfq
  247. /*
  248. * Do the decompression, and jump to the new kernel..
  249. */
  250. pushq %rsi # Save the real mode argument
  251. movq %rsi, %rdi # real mode address
  252. leaq boot_heap(%rip), %rsi # malloc area for uncompression
  253. leaq input_data(%rip), %rdx # input_data
  254. movl input_len(%rip), %eax
  255. movq %rax, %rcx # input_len
  256. movq %rbp, %r8 # output
  257. call decompress_kernel
  258. popq %rsi
  259. /*
  260. * Jump to the decompressed kernel.
  261. */
  262. jmp *%rbp
  263. .data
  264. gdt:
  265. .word gdt_end - gdt
  266. .long gdt
  267. .word 0
  268. .quad 0x0000000000000000 /* NULL descriptor */
  269. .quad 0x00af9a000000ffff /* __KERNEL_CS */
  270. .quad 0x00cf92000000ffff /* __KERNEL_DS */
  271. .quad 0x0080890000000000 /* TS descriptor */
  272. .quad 0x0000000000000000 /* TS continued */
  273. gdt_end:
  274. .bss
  275. /* Stack and heap for uncompression */
  276. .balign 4
  277. boot_heap:
  278. .fill BOOT_HEAP_SIZE, 1, 0
  279. boot_stack:
  280. .fill BOOT_STACK_SIZE, 1, 0
  281. boot_stack_end: