head_64.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /*
  2. * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7. * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8. * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/threads.h>
  12. #include <linux/init.h>
  13. #include <asm/segment.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/page.h>
  16. #include <asm/msr.h>
  17. #include <asm/cache.h>
  18. #include <asm/processor-flags.h>
  19. #include <asm/percpu.h>
  20. #include <asm/nops.h>
  21. #ifdef CONFIG_PARAVIRT
  22. #include <asm/asm-offsets.h>
  23. #include <asm/paravirt.h>
  24. #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
  25. #else
  26. #define GET_CR2_INTO(reg) movq %cr2, reg
  27. #define INTERRUPT_RETURN iretq
  28. #endif
  29. /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
  30. * because we need identity-mapped pages.
  31. *
  32. */
  33. #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  34. L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
  35. L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
  36. L4_START_KERNEL = pgd_index(__START_KERNEL_map)
  37. L3_START_KERNEL = pud_index(__START_KERNEL_map)
  38. .text
  39. __HEAD
  40. .code64
  41. .globl startup_64
  42. startup_64:
  43. /*
  44. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
  45. * and someone has loaded an identity mapped page table
  46. * for us. These identity mapped page tables map all of the
  47. * kernel pages and possibly all of memory.
  48. *
  49. * %rsi holds a physical pointer to real_mode_data.
  50. *
  51. * We come here either directly from a 64bit bootloader, or from
  52. * arch/x86_64/boot/compressed/head.S.
  53. *
  54. * We only come here initially at boot nothing else comes here.
  55. *
  56. * Since we may be loaded at an address different from what we were
  57. * compiled to run at we first fixup the physical addresses in our page
  58. * tables and then reload them.
  59. */
  60. /*
  61. * Compute the delta between the address I am compiled to run at and the
  62. * address I am actually running at.
  63. */
  64. leaq _text(%rip), %rbp
  65. subq $_text - __START_KERNEL_map, %rbp
  66. /* Is the address not 2M aligned? */
  67. movq %rbp, %rax
  68. andl $~PMD_PAGE_MASK, %eax
  69. testl %eax, %eax
  70. jnz bad_address
  71. /*
  72. * Is the address too large?
  73. */
  74. leaq _text(%rip), %rax
  75. shrq $MAX_PHYSMEM_BITS, %rax
  76. jnz bad_address
  77. /*
  78. * Fixup the physical addresses in the page table
  79. */
  80. addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
  81. addq %rbp, level3_kernel_pgt + (510*8)(%rip)
  82. addq %rbp, level3_kernel_pgt + (511*8)(%rip)
  83. addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
  84. /*
  85. * Set up the identity mapping for the switchover. These
  86. * entries should *NOT* have the global bit set! This also
  87. * creates a bunch of nonsense entries but that is fine --
  88. * it avoids problems around wraparound.
  89. */
  90. leaq _text(%rip), %rdi
  91. leaq early_level4_pgt(%rip), %rbx
  92. movq %rdi, %rax
  93. shrq $PGDIR_SHIFT, %rax
  94. leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx
  95. movq %rdx, 0(%rbx,%rax,8)
  96. movq %rdx, 8(%rbx,%rax,8)
  97. addq $4096, %rdx
  98. movq %rdi, %rax
  99. shrq $PUD_SHIFT, %rax
  100. andl $(PTRS_PER_PUD-1), %eax
  101. movq %rdx, (4096+0)(%rbx,%rax,8)
  102. movq %rdx, (4096+8)(%rbx,%rax,8)
  103. addq $8192, %rbx
  104. movq %rdi, %rax
  105. shrq $PMD_SHIFT, %rdi
  106. addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
  107. leaq (_end - 1)(%rip), %rcx
  108. shrq $PMD_SHIFT, %rcx
  109. subq %rdi, %rcx
  110. incl %ecx
  111. 1:
  112. andq $(PTRS_PER_PMD - 1), %rdi
  113. movq %rax, (%rbx,%rdi,8)
  114. incq %rdi
  115. addq $PMD_SIZE, %rax
  116. decl %ecx
  117. jnz 1b
  118. /*
  119. * Fixup the kernel text+data virtual addresses. Note that
  120. * we might write invalid pmds, when the kernel is relocated
  121. * cleanup_highmap() fixes this up along with the mappings
  122. * beyond _end.
  123. */
  124. leaq level2_kernel_pgt(%rip), %rdi
  125. leaq 4096(%rdi), %r8
  126. /* See if it is a valid page table entry */
  127. 1: testq $1, 0(%rdi)
  128. jz 2f
  129. addq %rbp, 0(%rdi)
  130. /* Go to the next page */
  131. 2: addq $8, %rdi
  132. cmp %r8, %rdi
  133. jne 1b
  134. /* Fixup phys_base */
  135. addq %rbp, phys_base(%rip)
  136. movq $(early_level4_pgt - __START_KERNEL_map), %rax
  137. jmp 1f
  138. ENTRY(secondary_startup_64)
  139. /*
  140. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
  141. * and someone has loaded a mapped page table.
  142. *
  143. * %rsi holds a physical pointer to real_mode_data.
  144. *
  145. * We come here either from startup_64 (using physical addresses)
  146. * or from trampoline.S (using virtual addresses).
  147. *
  148. * Using virtual addresses from trampoline.S removes the need
  149. * to have any identity mapped pages in the kernel page table
  150. * after the boot processor executes this code.
  151. */
  152. movq $(init_level4_pgt - __START_KERNEL_map), %rax
  153. 1:
  154. /* Enable PAE mode and PGE */
  155. movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
  156. movq %rcx, %cr4
  157. /* Setup early boot stage 4 level pagetables. */
  158. addq phys_base(%rip), %rax
  159. movq %rax, %cr3
  160. /* Ensure I am executing from virtual addresses */
  161. movq $1f, %rax
  162. jmp *%rax
  163. 1:
  164. /* Check if nx is implemented */
  165. movl $0x80000001, %eax
  166. cpuid
  167. movl %edx,%edi
  168. /* Setup EFER (Extended Feature Enable Register) */
  169. movl $MSR_EFER, %ecx
  170. rdmsr
  171. btsl $_EFER_SCE, %eax /* Enable System Call */
  172. btl $20,%edi /* No Execute supported? */
  173. jnc 1f
  174. btsl $_EFER_NX, %eax
  175. btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
  176. 1: wrmsr /* Make changes effective */
  177. /* Setup cr0 */
  178. #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
  179. X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
  180. X86_CR0_PG)
  181. movl $CR0_STATE, %eax
  182. /* Make changes effective */
  183. movq %rax, %cr0
  184. /* Setup a boot time stack */
  185. movq stack_start(%rip), %rsp
  186. /* zero EFLAGS after setting rsp */
  187. pushq $0
  188. popfq
  189. /*
  190. * We must switch to a new descriptor in kernel space for the GDT
  191. * because soon the kernel won't have access anymore to the userspace
  192. * addresses where we're currently running on. We have to do that here
  193. * because in 32bit we couldn't load a 64bit linear address.
  194. */
  195. lgdt early_gdt_descr(%rip)
  196. /* set up data segments */
  197. xorl %eax,%eax
  198. movl %eax,%ds
  199. movl %eax,%ss
  200. movl %eax,%es
  201. /*
  202. * We don't really need to load %fs or %gs, but load them anyway
  203. * to kill any stale realmode selectors. This allows execution
  204. * under VT hardware.
  205. */
  206. movl %eax,%fs
  207. movl %eax,%gs
  208. /* Set up %gs.
  209. *
  210. * The base of %gs always points to the bottom of the irqstack
  211. * union. If the stack protector canary is enabled, it is
  212. * located at %gs:40. Note that, on SMP, the boot cpu uses
  213. * init data section till per cpu areas are set up.
  214. */
  215. movl $MSR_GS_BASE,%ecx
  216. movl initial_gs(%rip),%eax
  217. movl initial_gs+4(%rip),%edx
  218. wrmsr
  219. /* rsi is pointer to real mode structure with interesting info.
  220. pass it to C */
  221. movq %rsi, %rdi
  222. /* Finally jump to run C code and to be on real kernel address
  223. * Since we are running on identity-mapped space we have to jump
  224. * to the full 64bit address, this is only possible as indirect
  225. * jump. In addition we need to ensure %cs is set so we make this
  226. * a far return.
  227. *
  228. * Note: do not change to far jump indirect with 64bit offset.
  229. *
  230. * AMD does not support far jump indirect with 64bit offset.
  231. * AMD64 Architecture Programmer's Manual, Volume 3: states only
  232. * JMP FAR mem16:16 FF /5 Far jump indirect,
  233. * with the target specified by a far pointer in memory.
  234. * JMP FAR mem16:32 FF /5 Far jump indirect,
  235. * with the target specified by a far pointer in memory.
  236. *
  237. * Intel64 does support 64bit offset.
  238. * Software Developer Manual Vol 2: states:
  239. * FF /5 JMP m16:16 Jump far, absolute indirect,
  240. * address given in m16:16
  241. * FF /5 JMP m16:32 Jump far, absolute indirect,
  242. * address given in m16:32.
  243. * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
  244. * address given in m16:64.
  245. */
  246. movq initial_code(%rip),%rax
  247. pushq $0 # fake return address to stop unwinder
  248. pushq $__KERNEL_CS # set correct cs
  249. pushq %rax # target address in negative space
  250. lretq
  251. #ifdef CONFIG_HOTPLUG_CPU
  252. /*
  253. * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
  254. * up already except stack. We just set up stack here. Then call
  255. * start_secondary().
  256. */
  257. ENTRY(start_cpu0)
  258. movq stack_start(%rip),%rsp
  259. movq initial_code(%rip),%rax
  260. pushq $0 # fake return address to stop unwinder
  261. pushq $__KERNEL_CS # set correct cs
  262. pushq %rax # target address in negative space
  263. lretq
  264. ENDPROC(start_cpu0)
  265. #endif
  266. /* SMP bootup changes these two */
  267. __REFDATA
  268. .balign 8
  269. GLOBAL(initial_code)
  270. .quad x86_64_start_kernel
  271. GLOBAL(initial_gs)
  272. .quad INIT_PER_CPU_VAR(irq_stack_union)
  273. GLOBAL(stack_start)
  274. .quad init_thread_union+THREAD_SIZE-8
  275. .word 0
  276. __FINITDATA
  277. bad_address:
  278. jmp bad_address
  279. __INIT
  280. .globl early_idt_handlers
  281. early_idt_handlers:
  282. # 104(%rsp) %rflags
  283. # 96(%rsp) %cs
  284. # 88(%rsp) %rip
  285. # 80(%rsp) error code
  286. i = 0
  287. .rept NUM_EXCEPTION_VECTORS
  288. .if (EXCEPTION_ERRCODE_MASK >> i) & 1
  289. ASM_NOP2
  290. .else
  291. pushq $0 # Dummy error code, to make stack frame uniform
  292. .endif
  293. pushq $i # 72(%rsp) Vector number
  294. jmp early_idt_handler
  295. i = i + 1
  296. .endr
  297. /* This is global to keep gas from relaxing the jumps */
  298. ENTRY(early_idt_handler)
  299. cld
  300. cmpl $2,early_recursion_flag(%rip)
  301. jz 1f
  302. incl early_recursion_flag(%rip)
  303. pushq %rax # 64(%rsp)
  304. pushq %rcx # 56(%rsp)
  305. pushq %rdx # 48(%rsp)
  306. pushq %rsi # 40(%rsp)
  307. pushq %rdi # 32(%rsp)
  308. pushq %r8 # 24(%rsp)
  309. pushq %r9 # 16(%rsp)
  310. pushq %r10 # 8(%rsp)
  311. pushq %r11 # 0(%rsp)
  312. cmpl $__KERNEL_CS,96(%rsp)
  313. jne 11f
  314. cmpl $14,72(%rsp) # Page fault?
  315. jnz 10f
  316. GET_CR2_INTO(%rdi) # can clobber any volatile register if pv
  317. call early_make_pgtable
  318. andl %eax,%eax
  319. jz 20f # All good
  320. 10:
  321. leaq 88(%rsp),%rdi # Pointer to %rip
  322. call early_fixup_exception
  323. andl %eax,%eax
  324. jnz 20f # Found an exception entry
  325. 11:
  326. #ifdef CONFIG_EARLY_PRINTK
  327. GET_CR2_INTO(%r9) # can clobber any volatile register if pv
  328. movl 80(%rsp),%r8d # error code
  329. movl 72(%rsp),%esi # vector number
  330. movl 96(%rsp),%edx # %cs
  331. movq 88(%rsp),%rcx # %rip
  332. xorl %eax,%eax
  333. leaq early_idt_msg(%rip),%rdi
  334. call early_printk
  335. cmpl $2,early_recursion_flag(%rip)
  336. jz 1f
  337. call dump_stack
  338. #ifdef CONFIG_KALLSYMS
  339. leaq early_idt_ripmsg(%rip),%rdi
  340. movq 40(%rsp),%rsi # %rip again
  341. call __print_symbol
  342. #endif
  343. #endif /* EARLY_PRINTK */
  344. 1: hlt
  345. jmp 1b
  346. 20: # Exception table entry found or page table generated
  347. popq %r11
  348. popq %r10
  349. popq %r9
  350. popq %r8
  351. popq %rdi
  352. popq %rsi
  353. popq %rdx
  354. popq %rcx
  355. popq %rax
  356. addq $16,%rsp # drop vector number and error code
  357. decl early_recursion_flag(%rip)
  358. INTERRUPT_RETURN
  359. ENDPROC(early_idt_handler)
  360. __INITDATA
  361. .balign 4
  362. early_recursion_flag:
  363. .long 0
  364. #ifdef CONFIG_EARLY_PRINTK
  365. early_idt_msg:
  366. .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
  367. early_idt_ripmsg:
  368. .asciz "RIP %s\n"
  369. #endif /* CONFIG_EARLY_PRINTK */
  370. #define NEXT_PAGE(name) \
  371. .balign PAGE_SIZE; \
  372. GLOBAL(name)
  373. /* Automate the creation of 1 to 1 mapping pmd entries */
  374. #define PMDS(START, PERM, COUNT) \
  375. i = 0 ; \
  376. .rept (COUNT) ; \
  377. .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
  378. i = i + 1 ; \
  379. .endr
  380. __INITDATA
  381. NEXT_PAGE(early_level4_pgt)
  382. .fill 511,8,0
  383. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  384. NEXT_PAGE(early_dynamic_pgts)
  385. .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
  386. .data
  387. #ifndef CONFIG_XEN
  388. NEXT_PAGE(init_level4_pgt)
  389. .fill 512,8,0
  390. #else
  391. NEXT_PAGE(init_level4_pgt)
  392. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  393. .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
  394. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  395. .org init_level4_pgt + L4_START_KERNEL*8, 0
  396. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  397. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  398. NEXT_PAGE(level3_ident_pgt)
  399. .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  400. .fill 511, 8, 0
  401. NEXT_PAGE(level2_ident_pgt)
  402. /* Since I easily can, map the first 1G.
  403. * Don't set NX because code runs from these pages.
  404. */
  405. PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
  406. #endif
  407. NEXT_PAGE(level3_kernel_pgt)
  408. .fill L3_START_KERNEL,8,0
  409. /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
  410. .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
  411. .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  412. NEXT_PAGE(level2_kernel_pgt)
  413. /*
  414. * 512 MB kernel mapping. We spend a full page on this pagetable
  415. * anyway.
  416. *
  417. * The kernel code+data+bss must not be bigger than that.
  418. *
  419. * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
  420. * If you want to increase this then increase MODULES_VADDR
  421. * too.)
  422. */
  423. PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
  424. KERNEL_IMAGE_SIZE/PMD_SIZE)
  425. NEXT_PAGE(level2_fixmap_pgt)
  426. .fill 506,8,0
  427. .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  428. /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
  429. .fill 5,8,0
  430. NEXT_PAGE(level1_fixmap_pgt)
  431. .fill 512,8,0
  432. #undef PMDS
  433. .data
  434. .align 16
  435. .globl early_gdt_descr
  436. early_gdt_descr:
  437. .word GDT_ENTRIES*8-1
  438. early_gdt_descr_base:
  439. .quad INIT_PER_CPU_VAR(gdt_page)
  440. ENTRY(phys_base)
  441. /* This must match the first entry in level2_kernel_pgt */
  442. .quad 0x0000000000000000
  443. #include "../../x86/xen/xen-head.S"
  444. .section .bss, "aw", @nobits
  445. .align L1_CACHE_BYTES
  446. ENTRY(idt_table)
  447. .skip IDT_ENTRIES * 16
  448. .align L1_CACHE_BYTES
  449. ENTRY(nmi_idt_table)
  450. .skip IDT_ENTRIES * 16
  451. __PAGE_ALIGNED_BSS
  452. NEXT_PAGE(empty_zero_page)
  453. .skip PAGE_SIZE