head_64.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7. * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8. * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/threads.h>
  12. #include <linux/init.h>
  13. #include <asm/segment.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/page.h>
  16. #include <asm/msr.h>
  17. #include <asm/cache.h>
  18. #include <asm/processor-flags.h>
  19. #include <asm/percpu.h>
  20. #include <asm/nops.h>
  21. #ifdef CONFIG_PARAVIRT
  22. #include <asm/asm-offsets.h>
  23. #include <asm/paravirt.h>
  24. #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
  25. #else
  26. #define GET_CR2_INTO(reg) movq %cr2, reg
  27. #define INTERRUPT_RETURN iretq
  28. #endif
  29. /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
  30. * because we need identity-mapped pages.
  31. *
  32. */
  33. #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  34. L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
  35. L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
  36. L4_START_KERNEL = pgd_index(__START_KERNEL_map)
  37. L3_START_KERNEL = pud_index(__START_KERNEL_map)
  38. .text
  39. __HEAD
  40. .code64
  41. .globl startup_64
  42. startup_64:
  43. /*
  44. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  45. * and someone has loaded an identity mapped page table
  46. * for us. These identity mapped page tables map all of the
  47. * kernel pages and possibly all of memory.
  48. *
  49. * %esi holds a physical pointer to real_mode_data.
  50. *
  51. * We come here either directly from a 64bit bootloader, or from
  52. * arch/x86_64/boot/compressed/head.S.
  53. *
  54. * We only come here initially at boot nothing else comes here.
  55. *
  56. * Since we may be loaded at an address different from what we were
  57. * compiled to run at we first fixup the physical addresses in our page
  58. * tables and then reload them.
  59. */
  60. /* Compute the delta between the address I am compiled to run at and the
  61. * address I am actually running at.
  62. */
  63. leaq _text(%rip), %rbp
  64. subq $_text - __START_KERNEL_map, %rbp
  65. /* Is the address not 2M aligned? */
  66. movq %rbp, %rax
  67. andl $~PMD_PAGE_MASK, %eax
  68. testl %eax, %eax
  69. jnz bad_address
  70. /* Is the address too large? */
  71. leaq _text(%rip), %rdx
  72. movq $PGDIR_SIZE, %rax
  73. cmpq %rax, %rdx
  74. jae bad_address
  75. /* Fixup the physical addresses in the page table
  76. */
  77. addq %rbp, init_level4_pgt + 0(%rip)
  78. addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
  79. addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
  80. addq %rbp, level3_ident_pgt + 0(%rip)
  81. addq %rbp, level3_kernel_pgt + (510*8)(%rip)
  82. addq %rbp, level3_kernel_pgt + (511*8)(%rip)
  83. addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
  84. /* Add an Identity mapping if I am above 1G */
  85. leaq _text(%rip), %rdi
  86. andq $PMD_PAGE_MASK, %rdi
  87. movq %rdi, %rax
  88. shrq $PUD_SHIFT, %rax
  89. andq $(PTRS_PER_PUD - 1), %rax
  90. jz ident_complete
  91. leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
  92. leaq level3_ident_pgt(%rip), %rbx
  93. movq %rdx, 0(%rbx, %rax, 8)
  94. movq %rdi, %rax
  95. shrq $PMD_SHIFT, %rax
  96. andq $(PTRS_PER_PMD - 1), %rax
  97. leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
  98. leaq level2_spare_pgt(%rip), %rbx
  99. movq %rdx, 0(%rbx, %rax, 8)
  100. ident_complete:
  101. /*
  102. * Fixup the kernel text+data virtual addresses. Note that
  103. * we might write invalid pmds, when the kernel is relocated
  104. * cleanup_highmap() fixes this up along with the mappings
  105. * beyond _end.
  106. */
  107. leaq level2_kernel_pgt(%rip), %rdi
  108. leaq 4096(%rdi), %r8
  109. /* See if it is a valid page table entry */
  110. 1: testq $1, 0(%rdi)
  111. jz 2f
  112. addq %rbp, 0(%rdi)
  113. /* Go to the next page */
  114. 2: addq $8, %rdi
  115. cmp %r8, %rdi
  116. jne 1b
  117. /* Fixup phys_base */
  118. addq %rbp, phys_base(%rip)
  119. /* Due to ENTRY(), sometimes the empty space gets filled with
  120. * zeros. Better take a jmp than relying on empty space being
  121. * filled with 0x90 (nop)
  122. */
  123. jmp secondary_startup_64
  124. ENTRY(secondary_startup_64)
  125. /*
  126. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  127. * and someone has loaded a mapped page table.
  128. *
  129. * %esi holds a physical pointer to real_mode_data.
  130. *
  131. * We come here either from startup_64 (using physical addresses)
  132. * or from trampoline.S (using virtual addresses).
  133. *
  134. * Using virtual addresses from trampoline.S removes the need
  135. * to have any identity mapped pages in the kernel page table
  136. * after the boot processor executes this code.
  137. */
  138. /* Enable PAE mode and PGE */
  139. movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
  140. movq %rax, %cr4
  141. /* Setup early boot stage 4 level pagetables. */
  142. movq $(init_level4_pgt - __START_KERNEL_map), %rax
  143. addq phys_base(%rip), %rax
  144. movq %rax, %cr3
  145. /* Ensure I am executing from virtual addresses */
  146. movq $1f, %rax
  147. jmp *%rax
  148. 1:
  149. /* Check if nx is implemented */
  150. movl $0x80000001, %eax
  151. cpuid
  152. movl %edx,%edi
  153. /* Setup EFER (Extended Feature Enable Register) */
  154. movl $MSR_EFER, %ecx
  155. rdmsr
  156. btsl $_EFER_SCE, %eax /* Enable System Call */
  157. btl $20,%edi /* No Execute supported? */
  158. jnc 1f
  159. btsl $_EFER_NX, %eax
  160. 1: wrmsr /* Make changes effective */
  161. /* Setup cr0 */
  162. #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
  163. X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
  164. X86_CR0_PG)
  165. movl $CR0_STATE, %eax
  166. /* Make changes effective */
  167. movq %rax, %cr0
  168. /* Setup a boot time stack */
  169. movq stack_start(%rip),%rsp
  170. /* zero EFLAGS after setting rsp */
  171. pushq $0
  172. popfq
  173. /*
  174. * We must switch to a new descriptor in kernel space for the GDT
  175. * because soon the kernel won't have access anymore to the userspace
  176. * addresses where we're currently running on. We have to do that here
  177. * because in 32bit we couldn't load a 64bit linear address.
  178. */
  179. lgdt early_gdt_descr(%rip)
  180. /* set up data segments */
  181. xorl %eax,%eax
  182. movl %eax,%ds
  183. movl %eax,%ss
  184. movl %eax,%es
  185. /*
  186. * We don't really need to load %fs or %gs, but load them anyway
  187. * to kill any stale realmode selectors. This allows execution
  188. * under VT hardware.
  189. */
  190. movl %eax,%fs
  191. movl %eax,%gs
  192. /* Set up %gs.
  193. *
  194. * The base of %gs always points to the bottom of the irqstack
  195. * union. If the stack protector canary is enabled, it is
  196. * located at %gs:40. Note that, on SMP, the boot cpu uses
  197. * init data section till per cpu areas are set up.
  198. */
  199. movl $MSR_GS_BASE,%ecx
  200. movl initial_gs(%rip),%eax
  201. movl initial_gs+4(%rip),%edx
  202. wrmsr
  203. /* esi is pointer to real mode structure with interesting info.
  204. pass it to C */
  205. movl %esi, %edi
  206. /* Finally jump to run C code and to be on real kernel address
  207. * Since we are running on identity-mapped space we have to jump
  208. * to the full 64bit address, this is only possible as indirect
  209. * jump. In addition we need to ensure %cs is set so we make this
  210. * a far return.
  211. */
  212. movq initial_code(%rip),%rax
  213. pushq $0 # fake return address to stop unwinder
  214. pushq $__KERNEL_CS # set correct cs
  215. pushq %rax # target address in negative space
  216. lretq
  217. #ifdef CONFIG_HOTPLUG_CPU
  218. /*
  219. * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
  220. * up already except stack. We just set up stack here. Then call
  221. * start_secondary().
  222. */
  223. ENTRY(start_cpu0)
  224. movq stack_start(%rip),%rsp
  225. movq initial_code(%rip),%rax
  226. pushq $0 # fake return address to stop unwinder
  227. pushq $__KERNEL_CS # set correct cs
  228. pushq %rax # target address in negative space
  229. lretq
  230. ENDPROC(start_cpu0)
  231. #endif
  232. /* SMP bootup changes these two */
  233. __REFDATA
  234. .align 8
  235. ENTRY(initial_code)
  236. .quad x86_64_start_kernel
  237. ENTRY(initial_gs)
  238. .quad INIT_PER_CPU_VAR(irq_stack_union)
  239. ENTRY(stack_start)
  240. .quad init_thread_union+THREAD_SIZE-8
  241. .word 0
  242. __FINITDATA
  243. bad_address:
  244. jmp bad_address
  245. .section ".init.text","ax"
  246. .globl early_idt_handlers
  247. early_idt_handlers:
  248. # 104(%rsp) %rflags
  249. # 96(%rsp) %cs
  250. # 88(%rsp) %rip
  251. # 80(%rsp) error code
  252. i = 0
  253. .rept NUM_EXCEPTION_VECTORS
  254. .if (EXCEPTION_ERRCODE_MASK >> i) & 1
  255. ASM_NOP2
  256. .else
  257. pushq $0 # Dummy error code, to make stack frame uniform
  258. .endif
  259. pushq $i # 72(%rsp) Vector number
  260. jmp early_idt_handler
  261. i = i + 1
  262. .endr
  263. ENTRY(early_idt_handler)
  264. cld
  265. cmpl $2,early_recursion_flag(%rip)
  266. jz 1f
  267. incl early_recursion_flag(%rip)
  268. pushq %rax # 64(%rsp)
  269. pushq %rcx # 56(%rsp)
  270. pushq %rdx # 48(%rsp)
  271. pushq %rsi # 40(%rsp)
  272. pushq %rdi # 32(%rsp)
  273. pushq %r8 # 24(%rsp)
  274. pushq %r9 # 16(%rsp)
  275. pushq %r10 # 8(%rsp)
  276. pushq %r11 # 0(%rsp)
  277. cmpl $__KERNEL_CS,96(%rsp)
  278. jne 10f
  279. leaq 88(%rsp),%rdi # Pointer to %rip
  280. call early_fixup_exception
  281. andl %eax,%eax
  282. jnz 20f # Found an exception entry
  283. 10:
  284. #ifdef CONFIG_EARLY_PRINTK
  285. GET_CR2_INTO(%r9) # can clobber any volatile register if pv
  286. movl 80(%rsp),%r8d # error code
  287. movl 72(%rsp),%esi # vector number
  288. movl 96(%rsp),%edx # %cs
  289. movq 88(%rsp),%rcx # %rip
  290. xorl %eax,%eax
  291. leaq early_idt_msg(%rip),%rdi
  292. call early_printk
  293. cmpl $2,early_recursion_flag(%rip)
  294. jz 1f
  295. call dump_stack
  296. #ifdef CONFIG_KALLSYMS
  297. leaq early_idt_ripmsg(%rip),%rdi
  298. movq 40(%rsp),%rsi # %rip again
  299. call __print_symbol
  300. #endif
  301. #endif /* EARLY_PRINTK */
  302. 1: hlt
  303. jmp 1b
  304. 20: # Exception table entry found
  305. popq %r11
  306. popq %r10
  307. popq %r9
  308. popq %r8
  309. popq %rdi
  310. popq %rsi
  311. popq %rdx
  312. popq %rcx
  313. popq %rax
  314. addq $16,%rsp # drop vector number and error code
  315. decl early_recursion_flag(%rip)
  316. INTERRUPT_RETURN
  317. .balign 4
  318. early_recursion_flag:
  319. .long 0
  320. #ifdef CONFIG_EARLY_PRINTK
  321. early_idt_msg:
  322. .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
  323. early_idt_ripmsg:
  324. .asciz "RIP %s\n"
  325. #endif /* CONFIG_EARLY_PRINTK */
  326. .previous
  327. #define NEXT_PAGE(name) \
  328. .balign PAGE_SIZE; \
  329. ENTRY(name)
  330. /* Automate the creation of 1 to 1 mapping pmd entries */
  331. #define PMDS(START, PERM, COUNT) \
  332. i = 0 ; \
  333. .rept (COUNT) ; \
  334. .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
  335. i = i + 1 ; \
  336. .endr
  337. .data
  338. /*
  339. * This default setting generates an ident mapping at address 0x100000
  340. * and a mapping for the kernel that precisely maps virtual address
  341. * 0xffffffff80000000 to physical address 0x000000. (always using
  342. * 2Mbyte large pages provided by PAE mode)
  343. */
  344. NEXT_PAGE(init_level4_pgt)
  345. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  346. .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
  347. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  348. .org init_level4_pgt + L4_START_KERNEL*8, 0
  349. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  350. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  351. NEXT_PAGE(level3_ident_pgt)
  352. .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  353. .fill 511,8,0
  354. NEXT_PAGE(level3_kernel_pgt)
  355. .fill L3_START_KERNEL,8,0
  356. /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
  357. .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
  358. .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  359. NEXT_PAGE(level2_fixmap_pgt)
  360. .fill 506,8,0
  361. .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  362. /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
  363. .fill 5,8,0
  364. NEXT_PAGE(level1_fixmap_pgt)
  365. .fill 512,8,0
  366. NEXT_PAGE(level2_ident_pgt)
  367. /* Since I easily can, map the first 1G.
  368. * Don't set NX because code runs from these pages.
  369. */
  370. PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
  371. NEXT_PAGE(level2_kernel_pgt)
  372. /*
  373. * 512 MB kernel mapping. We spend a full page on this pagetable
  374. * anyway.
  375. *
  376. * The kernel code+data+bss must not be bigger than that.
  377. *
  378. * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
  379. * If you want to increase this then increase MODULES_VADDR
  380. * too.)
  381. */
  382. PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
  383. KERNEL_IMAGE_SIZE/PMD_SIZE)
  384. NEXT_PAGE(level2_spare_pgt)
  385. .fill 512, 8, 0
  386. #undef PMDS
  387. #undef NEXT_PAGE
  388. .data
  389. .align 16
  390. .globl early_gdt_descr
  391. early_gdt_descr:
  392. .word GDT_ENTRIES*8-1
  393. early_gdt_descr_base:
  394. .quad INIT_PER_CPU_VAR(gdt_page)
  395. ENTRY(phys_base)
  396. /* This must match the first entry in level2_kernel_pgt */
  397. .quad 0x0000000000000000
  398. #include "../../x86/xen/xen-head.S"
  399. .section .bss, "aw", @nobits
  400. .align L1_CACHE_BYTES
  401. ENTRY(idt_table)
  402. .skip IDT_ENTRIES * 16
  403. .align L1_CACHE_BYTES
  404. ENTRY(nmi_idt_table)
  405. .skip IDT_ENTRIES * 16
  406. __PAGE_ALIGNED_BSS
  407. .align PAGE_SIZE
  408. ENTRY(empty_zero_page)
  409. .skip PAGE_SIZE