head_64.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /*
  2. * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7. * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8. * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/threads.h>
  12. #include <linux/init.h>
  13. #include <asm/desc.h>
  14. #include <asm/segment.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/page.h>
  17. #include <asm/msr.h>
  18. #include <asm/cache.h>
  19. #include <asm/processor-flags.h>
  20. #include <asm/percpu.h>
  21. #ifdef CONFIG_PARAVIRT
  22. #include <asm/asm-offsets.h>
  23. #include <asm/paravirt.h>
  24. #else
  25. #define GET_CR2_INTO_RCX movq %cr2, %rcx
  26. #endif
  27. /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
  28. * because we need identity-mapped pages.
  29. *
  30. */
  31. #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  32. L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
  33. L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
  34. L4_START_KERNEL = pgd_index(__START_KERNEL_map)
  35. L3_START_KERNEL = pud_index(__START_KERNEL_map)
  36. .text
  37. .section .text.head
  38. .code64
  39. .globl startup_64
  40. startup_64:
  41. /*
  42. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  43. * and someone has loaded an identity mapped page table
  44. * for us. These identity mapped page tables map all of the
  45. * kernel pages and possibly all of memory.
  46. *
  47. * %esi holds a physical pointer to real_mode_data.
  48. *
  49. * We come here either directly from a 64bit bootloader, or from
  50. * arch/x86_64/boot/compressed/head.S.
  51. *
  52. * We only come here initially at boot nothing else comes here.
  53. *
  54. * Since we may be loaded at an address different from what we were
  55. * compiled to run at we first fixup the physical addresses in our page
  56. * tables and then reload them.
  57. */
  58. /* Compute the delta between the address I am compiled to run at and the
  59. * address I am actually running at.
  60. */
  61. leaq _text(%rip), %rbp
  62. subq $_text - __START_KERNEL_map, %rbp
  63. /* Is the address not 2M aligned? */
  64. movq %rbp, %rax
  65. andl $~PMD_PAGE_MASK, %eax
  66. testl %eax, %eax
  67. jnz bad_address
  68. /* Is the address too large? */
  69. leaq _text(%rip), %rdx
  70. movq $PGDIR_SIZE, %rax
  71. cmpq %rax, %rdx
  72. jae bad_address
  73. /* Fixup the physical addresses in the page table
  74. */
  75. addq %rbp, init_level4_pgt + 0(%rip)
  76. addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
  77. addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
  78. addq %rbp, level3_ident_pgt + 0(%rip)
  79. addq %rbp, level3_kernel_pgt + (510*8)(%rip)
  80. addq %rbp, level3_kernel_pgt + (511*8)(%rip)
  81. addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
  82. /* Add an Identity mapping if I am above 1G */
  83. leaq _text(%rip), %rdi
  84. andq $PMD_PAGE_MASK, %rdi
  85. movq %rdi, %rax
  86. shrq $PUD_SHIFT, %rax
  87. andq $(PTRS_PER_PUD - 1), %rax
  88. jz ident_complete
  89. leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
  90. leaq level3_ident_pgt(%rip), %rbx
  91. movq %rdx, 0(%rbx, %rax, 8)
  92. movq %rdi, %rax
  93. shrq $PMD_SHIFT, %rax
  94. andq $(PTRS_PER_PMD - 1), %rax
  95. leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
  96. leaq level2_spare_pgt(%rip), %rbx
  97. movq %rdx, 0(%rbx, %rax, 8)
  98. ident_complete:
  99. /*
  100. * Fixup the kernel text+data virtual addresses. Note that
  101. * we might write invalid pmds, when the kernel is relocated
  102. * cleanup_highmap() fixes this up along with the mappings
  103. * beyond _end.
  104. */
  105. leaq level2_kernel_pgt(%rip), %rdi
  106. leaq 4096(%rdi), %r8
  107. /* See if it is a valid page table entry */
  108. 1: testq $1, 0(%rdi)
  109. jz 2f
  110. addq %rbp, 0(%rdi)
  111. /* Go to the next page */
  112. 2: addq $8, %rdi
  113. cmp %r8, %rdi
  114. jne 1b
  115. /* Fixup phys_base */
  116. addq %rbp, phys_base(%rip)
  117. #ifdef CONFIG_X86_TRAMPOLINE
  118. addq %rbp, trampoline_level4_pgt + 0(%rip)
  119. addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
  120. #endif
  121. /* Due to ENTRY(), sometimes the empty space gets filled with
  122. * zeros. Better take a jmp than relying on empty space being
  123. * filled with 0x90 (nop)
  124. */
  125. jmp secondary_startup_64
  126. ENTRY(secondary_startup_64)
  127. /*
  128. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  129. * and someone has loaded a mapped page table.
  130. *
  131. * %esi holds a physical pointer to real_mode_data.
  132. *
  133. * We come here either from startup_64 (using physical addresses)
  134. * or from trampoline.S (using virtual addresses).
  135. *
  136. * Using virtual addresses from trampoline.S removes the need
  137. * to have any identity mapped pages in the kernel page table
  138. * after the boot processor executes this code.
  139. */
  140. /* Enable PAE mode and PGE */
  141. movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
  142. movq %rax, %cr4
  143. /* Setup early boot stage 4 level pagetables. */
  144. movq $(init_level4_pgt - __START_KERNEL_map), %rax
  145. addq phys_base(%rip), %rax
  146. movq %rax, %cr3
  147. /* Ensure I am executing from virtual addresses */
  148. movq $1f, %rax
  149. jmp *%rax
  150. 1:
  151. /* Check if nx is implemented */
  152. movl $0x80000001, %eax
  153. cpuid
  154. movl %edx,%edi
  155. /* Setup EFER (Extended Feature Enable Register) */
  156. movl $MSR_EFER, %ecx
  157. rdmsr
  158. btsl $_EFER_SCE, %eax /* Enable System Call */
  159. btl $20,%edi /* No Execute supported? */
  160. jnc 1f
  161. btsl $_EFER_NX, %eax
  162. 1: wrmsr /* Make changes effective */
  163. /* Setup cr0 */
  164. #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
  165. X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
  166. X86_CR0_PG)
  167. movl $CR0_STATE, %eax
  168. /* Make changes effective */
  169. movq %rax, %cr0
  170. /* Setup a boot time stack */
  171. movq stack_start(%rip),%rsp
  172. /* zero EFLAGS after setting rsp */
  173. pushq $0
  174. popfq
  175. /*
  176. * We must switch to a new descriptor in kernel space for the GDT
  177. * because soon the kernel won't have access anymore to the userspace
  178. * addresses where we're currently running on. We have to do that here
  179. * because in 32bit we couldn't load a 64bit linear address.
  180. */
  181. lgdt early_gdt_descr(%rip)
  182. /* set up data segments. actually 0 would do too */
  183. movl $__KERNEL_DS,%eax
  184. movl %eax,%ds
  185. movl %eax,%ss
  186. movl %eax,%es
  187. /*
  188. * We don't really need to load %fs or %gs, but load them anyway
  189. * to kill any stale realmode selectors. This allows execution
  190. * under VT hardware.
  191. */
  192. movl %eax,%fs
  193. movl %eax,%gs
  194. /* Set up %gs.
  195. *
  196. * The base of %gs always points to the bottom of the irqstack
  197. * union. If the stack protector canary is enabled, it is
  198. * located at %gs:40. Note that, on SMP, the boot cpu uses
  199. * init data section till per cpu areas are set up.
  200. */
  201. movl $MSR_GS_BASE,%ecx
  202. movq initial_gs(%rip),%rax
  203. movq %rax,%rdx
  204. shrq $32,%rdx
  205. wrmsr
  206. /* esi is pointer to real mode structure with interesting info.
  207. pass it to C */
  208. movl %esi, %edi
  209. /* Finally jump to run C code and to be on real kernel address
  210. * Since we are running on identity-mapped space we have to jump
  211. * to the full 64bit address, this is only possible as indirect
  212. * jump. In addition we need to ensure %cs is set so we make this
  213. * a far return.
  214. */
  215. movq initial_code(%rip),%rax
  216. pushq $0 # fake return address to stop unwinder
  217. pushq $__KERNEL_CS # set correct cs
  218. pushq %rax # target address in negative space
  219. lretq
  220. /* SMP bootup changes these two */
  221. __REFDATA
  222. .align 8
  223. ENTRY(initial_code)
  224. .quad x86_64_start_kernel
  225. ENTRY(initial_gs)
  226. .quad INIT_PER_CPU_VAR(irq_stack_union)
  227. __FINITDATA
  228. ENTRY(stack_start)
  229. .quad init_thread_union+THREAD_SIZE-8
  230. .word 0
  231. bad_address:
  232. jmp bad_address
  233. .section ".init.text","ax"
  234. #ifdef CONFIG_EARLY_PRINTK
  235. .globl early_idt_handlers
  236. early_idt_handlers:
  237. i = 0
  238. .rept NUM_EXCEPTION_VECTORS
  239. movl $i, %esi
  240. jmp early_idt_handler
  241. i = i + 1
  242. .endr
  243. #endif
  244. ENTRY(early_idt_handler)
  245. #ifdef CONFIG_EARLY_PRINTK
  246. cmpl $2,early_recursion_flag(%rip)
  247. jz 1f
  248. incl early_recursion_flag(%rip)
  249. GET_CR2_INTO_RCX
  250. movq %rcx,%r9
  251. xorl %r8d,%r8d # zero for error code
  252. movl %esi,%ecx # get vector number
  253. # Test %ecx against mask of vectors that push error code.
  254. cmpl $31,%ecx
  255. ja 0f
  256. movl $1,%eax
  257. salq %cl,%rax
  258. testl $0x27d00,%eax
  259. je 0f
  260. popq %r8 # get error code
  261. 0: movq 0(%rsp),%rcx # get ip
  262. movq 8(%rsp),%rdx # get cs
  263. xorl %eax,%eax
  264. leaq early_idt_msg(%rip),%rdi
  265. call early_printk
  266. cmpl $2,early_recursion_flag(%rip)
  267. jz 1f
  268. call dump_stack
  269. #ifdef CONFIG_KALLSYMS
  270. leaq early_idt_ripmsg(%rip),%rdi
  271. movq 0(%rsp),%rsi # get rip again
  272. call __print_symbol
  273. #endif
  274. #endif /* EARLY_PRINTK */
  275. 1: hlt
  276. jmp 1b
  277. #ifdef CONFIG_EARLY_PRINTK
  278. early_recursion_flag:
  279. .long 0
  280. early_idt_msg:
  281. .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
  282. early_idt_ripmsg:
  283. .asciz "RIP %s\n"
  284. #endif /* CONFIG_EARLY_PRINTK */
  285. .previous
  286. #define NEXT_PAGE(name) \
  287. .balign PAGE_SIZE; \
  288. ENTRY(name)
  289. /* Automate the creation of 1 to 1 mapping pmd entries */
  290. #define PMDS(START, PERM, COUNT) \
  291. i = 0 ; \
  292. .rept (COUNT) ; \
  293. .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
  294. i = i + 1 ; \
  295. .endr
  296. /*
  297. * This default setting generates an ident mapping at address 0x100000
  298. * and a mapping for the kernel that precisely maps virtual address
  299. * 0xffffffff80000000 to physical address 0x000000. (always using
  300. * 2Mbyte large pages provided by PAE mode)
  301. */
  302. NEXT_PAGE(init_level4_pgt)
  303. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  304. .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
  305. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  306. .org init_level4_pgt + L4_START_KERNEL*8, 0
  307. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  308. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  309. NEXT_PAGE(level3_ident_pgt)
  310. .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  311. .fill 511,8,0
  312. NEXT_PAGE(level3_kernel_pgt)
  313. .fill L3_START_KERNEL,8,0
  314. /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
  315. .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
  316. .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  317. NEXT_PAGE(level2_fixmap_pgt)
  318. .fill 506,8,0
  319. .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  320. /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
  321. .fill 5,8,0
  322. NEXT_PAGE(level1_fixmap_pgt)
  323. .fill 512,8,0
  324. NEXT_PAGE(level2_ident_pgt)
  325. /* Since I easily can, map the first 1G.
  326. * Don't set NX because code runs from these pages.
  327. */
  328. PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
  329. NEXT_PAGE(level2_kernel_pgt)
  330. /*
  331. * 512 MB kernel mapping. We spend a full page on this pagetable
  332. * anyway.
  333. *
  334. * The kernel code+data+bss must not be bigger than that.
  335. *
  336. * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
  337. * If you want to increase this then increase MODULES_VADDR
  338. * too.)
  339. */
  340. PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
  341. KERNEL_IMAGE_SIZE/PMD_SIZE)
  342. NEXT_PAGE(level2_spare_pgt)
  343. .fill 512, 8, 0
  344. #undef PMDS
  345. #undef NEXT_PAGE
  346. .data
  347. .align 16
  348. .globl early_gdt_descr
  349. early_gdt_descr:
  350. .word GDT_ENTRIES*8-1
  351. early_gdt_descr_base:
  352. .quad INIT_PER_CPU_VAR(gdt_page)
  353. ENTRY(phys_base)
  354. /* This must match the first entry in level2_kernel_pgt */
  355. .quad 0x0000000000000000
  356. #include "../../x86/xen/xen-head.S"
  357. .section .bss, "aw", @nobits
  358. .align L1_CACHE_BYTES
  359. ENTRY(idt_table)
  360. .skip IDT_ENTRIES * 16
  361. .section .bss.page_aligned, "aw", @nobits
  362. .align PAGE_SIZE
  363. ENTRY(empty_zero_page)
  364. .skip PAGE_SIZE