head_64.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /*
  2. * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7. * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8. * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/threads.h>
  12. #include <linux/init.h>
  13. #include <asm/desc.h>
  14. #include <asm/segment.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/page.h>
  17. #include <asm/msr.h>
  18. #include <asm/cache.h>
  19. #include <asm/processor-flags.h>
  20. #ifdef CONFIG_PARAVIRT
  21. #include <asm/asm-offsets.h>
  22. #include <asm/paravirt.h>
  23. #else
  24. #define GET_CR2_INTO_RCX movq %cr2, %rcx
  25. #endif
  26. /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
  27. * because we need identity-mapped pages.
  28. *
  29. */
  30. .text
  31. .section .text.head
  32. .code64
  33. .globl startup_64
  34. startup_64:
  35. /*
  36. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  37. * and someone has loaded an identity mapped page table
  38. * for us. These identity mapped page tables map all of the
  39. * kernel pages and possibly all of memory.
  40. *
  41. * %esi holds a physical pointer to real_mode_data.
  42. *
  43. * We come here either directly from a 64bit bootloader, or from
  44. * arch/x86_64/boot/compressed/head.S.
  45. *
  46. * We only come here initially at boot nothing else comes here.
  47. *
  48. * Since we may be loaded at an address different from what we were
  49. * compiled to run at we first fixup the physical addresses in our page
  50. * tables and then reload them.
  51. */
  52. /* Compute the delta between the address I am compiled to run at and the
  53. * address I am actually running at.
  54. */
  55. leaq _text(%rip), %rbp
  56. subq $_text - __START_KERNEL_map, %rbp
  57. /* Is the address not 2M aligned? */
  58. movq %rbp, %rax
  59. andl $~PMD_PAGE_MASK, %eax
  60. testl %eax, %eax
  61. jnz bad_address
  62. /* Is the address too large? */
  63. leaq _text(%rip), %rdx
  64. movq $PGDIR_SIZE, %rax
  65. cmpq %rax, %rdx
  66. jae bad_address
  67. /* Fixup the physical addresses in the page table
  68. */
  69. addq %rbp, init_level4_pgt + 0(%rip)
  70. addq %rbp, init_level4_pgt + (258*8)(%rip)
  71. addq %rbp, init_level4_pgt + (511*8)(%rip)
  72. addq %rbp, level3_ident_pgt + 0(%rip)
  73. addq %rbp, level3_kernel_pgt + (510*8)(%rip)
  74. addq %rbp, level3_kernel_pgt + (511*8)(%rip)
  75. addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
  76. /* Add an Identity mapping if I am above 1G */
  77. leaq _text(%rip), %rdi
  78. andq $PMD_PAGE_MASK, %rdi
  79. movq %rdi, %rax
  80. shrq $PUD_SHIFT, %rax
  81. andq $(PTRS_PER_PUD - 1), %rax
  82. jz ident_complete
  83. leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
  84. leaq level3_ident_pgt(%rip), %rbx
  85. movq %rdx, 0(%rbx, %rax, 8)
  86. movq %rdi, %rax
  87. shrq $PMD_SHIFT, %rax
  88. andq $(PTRS_PER_PMD - 1), %rax
  89. leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
  90. leaq level2_spare_pgt(%rip), %rbx
  91. movq %rdx, 0(%rbx, %rax, 8)
  92. ident_complete:
  93. /*
  94. * Fixup the kernel text+data virtual addresses. Note that
  95. * we might write invalid pmds, when the kernel is relocated
  96. * cleanup_highmap() fixes this up along with the mappings
  97. * beyond _end.
  98. */
  99. leaq level2_kernel_pgt(%rip), %rdi
  100. leaq 4096(%rdi), %r8
  101. /* See if it is a valid page table entry */
  102. 1: testq $1, 0(%rdi)
  103. jz 2f
  104. addq %rbp, 0(%rdi)
  105. /* Go to the next page */
  106. 2: addq $8, %rdi
  107. cmp %r8, %rdi
  108. jne 1b
  109. /* Fixup phys_base */
  110. addq %rbp, phys_base(%rip)
  111. #ifdef CONFIG_SMP
  112. addq %rbp, trampoline_level4_pgt + 0(%rip)
  113. addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
  114. #endif
  115. /* Due to ENTRY(), sometimes the empty space gets filled with
  116. * zeros. Better take a jmp than relying on empty space being
  117. * filled with 0x90 (nop)
  118. */
  119. jmp secondary_startup_64
  120. ENTRY(secondary_startup_64)
  121. /*
  122. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  123. * and someone has loaded a mapped page table.
  124. *
  125. * %esi holds a physical pointer to real_mode_data.
  126. *
  127. * We come here either from startup_64 (using physical addresses)
  128. * or from trampoline.S (using virtual addresses).
  129. *
  130. * Using virtual addresses from trampoline.S removes the need
  131. * to have any identity mapped pages in the kernel page table
  132. * after the boot processor executes this code.
  133. */
  134. /* Enable PAE mode and PGE */
  135. movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
  136. movq %rax, %cr4
  137. /* Setup early boot stage 4 level pagetables. */
  138. movq $(init_level4_pgt - __START_KERNEL_map), %rax
  139. addq phys_base(%rip), %rax
  140. movq %rax, %cr3
  141. /* Ensure I am executing from virtual addresses */
  142. movq $1f, %rax
  143. jmp *%rax
  144. 1:
  145. /* Check if nx is implemented */
  146. movl $0x80000001, %eax
  147. cpuid
  148. movl %edx,%edi
  149. /* Setup EFER (Extended Feature Enable Register) */
  150. movl $MSR_EFER, %ecx
  151. rdmsr
  152. btsl $_EFER_SCE, %eax /* Enable System Call */
  153. btl $20,%edi /* No Execute supported? */
  154. jnc 1f
  155. btsl $_EFER_NX, %eax
  156. 1: wrmsr /* Make changes effective */
  157. /* Setup cr0 */
  158. #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
  159. X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
  160. X86_CR0_PG)
  161. movl $CR0_STATE, %eax
  162. /* Make changes effective */
  163. movq %rax, %cr0
  164. /* Setup a boot time stack */
  165. movq init_rsp(%rip),%rsp
  166. /* zero EFLAGS after setting rsp */
  167. pushq $0
  168. popfq
  169. /*
  170. * We must switch to a new descriptor in kernel space for the GDT
  171. * because soon the kernel won't have access anymore to the userspace
  172. * addresses where we're currently running on. We have to do that here
  173. * because in 32bit we couldn't load a 64bit linear address.
  174. */
  175. lgdt cpu_gdt_descr(%rip)
  176. /* set up data segments. actually 0 would do too */
  177. movl $__KERNEL_DS,%eax
  178. movl %eax,%ds
  179. movl %eax,%ss
  180. movl %eax,%es
  181. /*
  182. * We don't really need to load %fs or %gs, but load them anyway
  183. * to kill any stale realmode selectors. This allows execution
  184. * under VT hardware.
  185. */
  186. movl %eax,%fs
  187. movl %eax,%gs
  188. /*
  189. * Setup up a dummy PDA. this is just for some early bootup code
  190. * that does in_interrupt()
  191. */
  192. movl $MSR_GS_BASE,%ecx
  193. movq $empty_zero_page,%rax
  194. movq %rax,%rdx
  195. shrq $32,%rdx
  196. wrmsr
  197. /* esi is pointer to real mode structure with interesting info.
  198. pass it to C */
  199. movl %esi, %edi
  200. /* Finally jump to run C code and to be on real kernel address
  201. * Since we are running on identity-mapped space we have to jump
  202. * to the full 64bit address, this is only possible as indirect
  203. * jump. In addition we need to ensure %cs is set so we make this
  204. * a far return.
  205. */
  206. movq initial_code(%rip),%rax
  207. pushq $0 # fake return address to stop unwinder
  208. pushq $__KERNEL_CS # set correct cs
  209. pushq %rax # target address in negative space
  210. lretq
  211. /* SMP bootup changes these two */
  212. __REFDATA
  213. .align 8
  214. ENTRY(initial_code)
  215. .quad x86_64_start_kernel
  216. __FINITDATA
  217. ENTRY(init_rsp)
  218. .quad init_thread_union+THREAD_SIZE-8
  219. bad_address:
  220. jmp bad_address
  221. .section ".init.text","ax"
  222. #ifdef CONFIG_EARLY_PRINTK
  223. .globl early_idt_handlers
  224. early_idt_handlers:
  225. i = 0
  226. .rept NUM_EXCEPTION_VECTORS
  227. movl $i, %esi
  228. jmp early_idt_handler
  229. i = i + 1
  230. .endr
  231. #endif
  232. ENTRY(early_idt_handler)
  233. #ifdef CONFIG_EARLY_PRINTK
  234. cmpl $2,early_recursion_flag(%rip)
  235. jz 1f
  236. incl early_recursion_flag(%rip)
  237. GET_CR2_INTO_RCX
  238. movq %rcx,%r9
  239. xorl %r8d,%r8d # zero for error code
  240. movl %esi,%ecx # get vector number
  241. # Test %ecx against mask of vectors that push error code.
  242. cmpl $31,%ecx
  243. ja 0f
  244. movl $1,%eax
  245. salq %cl,%rax
  246. testl $0x27d00,%eax
  247. je 0f
  248. popq %r8 # get error code
  249. 0: movq 0(%rsp),%rcx # get ip
  250. movq 8(%rsp),%rdx # get cs
  251. xorl %eax,%eax
  252. leaq early_idt_msg(%rip),%rdi
  253. call early_printk
  254. cmpl $2,early_recursion_flag(%rip)
  255. jz 1f
  256. call dump_stack
  257. #ifdef CONFIG_KALLSYMS
  258. leaq early_idt_ripmsg(%rip),%rdi
  259. movq 8(%rsp),%rsi # get rip again
  260. call __print_symbol
  261. #endif
  262. #endif /* EARLY_PRINTK */
  263. 1: hlt
  264. jmp 1b
  265. #ifdef CONFIG_EARLY_PRINTK
  266. early_recursion_flag:
  267. .long 0
  268. early_idt_msg:
  269. .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
  270. early_idt_ripmsg:
  271. .asciz "RIP %s\n"
  272. #endif /* CONFIG_EARLY_PRINTK */
  273. .previous
  274. .balign PAGE_SIZE
  275. #define NEXT_PAGE(name) \
  276. .balign PAGE_SIZE; \
  277. ENTRY(name)
  278. /* Automate the creation of 1 to 1 mapping pmd entries */
  279. #define PMDS(START, PERM, COUNT) \
  280. i = 0 ; \
  281. .rept (COUNT) ; \
  282. .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
  283. i = i + 1 ; \
  284. .endr
  285. /*
  286. * This default setting generates an ident mapping at address 0x100000
  287. * and a mapping for the kernel that precisely maps virtual address
  288. * 0xffffffff80000000 to physical address 0x000000. (always using
  289. * 2Mbyte large pages provided by PAE mode)
  290. */
  291. NEXT_PAGE(init_level4_pgt)
  292. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  293. .fill 257,8,0
  294. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  295. .fill 252,8,0
  296. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  297. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  298. NEXT_PAGE(level3_ident_pgt)
  299. .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  300. .fill 511,8,0
  301. NEXT_PAGE(level3_kernel_pgt)
  302. .fill 510,8,0
  303. /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
  304. .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
  305. .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  306. NEXT_PAGE(level2_fixmap_pgt)
  307. .fill 506,8,0
  308. .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  309. /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
  310. .fill 5,8,0
  311. NEXT_PAGE(level1_fixmap_pgt)
  312. .fill 512,8,0
  313. NEXT_PAGE(level2_ident_pgt)
  314. /* Since I easily can, map the first 1G.
  315. * Don't set NX because code runs from these pages.
  316. */
  317. PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
  318. NEXT_PAGE(level2_kernel_pgt)
  319. /*
  320. * 512 MB kernel mapping. We spend a full page on this pagetable
  321. * anyway.
  322. *
  323. * The kernel code+data+bss must not be bigger than that.
  324. *
  325. * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
  326. * If you want to increase this then increase MODULES_VADDR
  327. * too.)
  328. */
  329. PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
  330. KERNEL_IMAGE_SIZE/PMD_SIZE)
  331. NEXT_PAGE(level2_spare_pgt)
  332. .fill 512, 8, 0
  333. #undef PMDS
  334. #undef NEXT_PAGE
  335. .data
  336. .align 16
  337. .globl cpu_gdt_descr
  338. cpu_gdt_descr:
  339. .word gdt_end-cpu_gdt_table-1
  340. gdt:
  341. .quad cpu_gdt_table
  342. #ifdef CONFIG_SMP
  343. .rept NR_CPUS-1
  344. .word 0
  345. .quad 0
  346. .endr
  347. #endif
  348. ENTRY(phys_base)
  349. /* This must match the first entry in level2_kernel_pgt */
  350. .quad 0x0000000000000000
  351. /* We need valid kernel segments for data and code in long mode too
  352. * IRET will check the segment types kkeil 2000/10/28
  353. * Also sysret mandates a special GDT layout
  354. */
  355. .section .data.page_aligned, "aw"
  356. .align PAGE_SIZE
  357. /* The TLS descriptors are currently at a different place compared to i386.
  358. Hopefully nobody expects them at a fixed place (Wine?) */
  359. ENTRY(cpu_gdt_table)
  360. .quad 0x0000000000000000 /* NULL descriptor */
  361. .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
  362. .quad 0x00af9b000000ffff /* __KERNEL_CS */
  363. .quad 0x00cf93000000ffff /* __KERNEL_DS */
  364. .quad 0x00cffb000000ffff /* __USER32_CS */
  365. .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
  366. .quad 0x00affb000000ffff /* __USER_CS */
  367. .quad 0x0 /* unused */
  368. .quad 0,0 /* TSS */
  369. .quad 0,0 /* LDT */
  370. .quad 0,0,0 /* three TLS descriptors */
  371. .quad 0x0000f40000000000 /* node/CPU stored in limit */
  372. gdt_end:
  373. /* asm/segment.h:GDT_ENTRIES must match this */
  374. /* This should be a multiple of the cache line size */
  375. /* GDTs of other CPUs are now dynamically allocated */
  376. /* zero the remaining page */
  377. .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
  378. .section .bss, "aw", @nobits
  379. .align L1_CACHE_BYTES
  380. ENTRY(idt_table)
  381. .skip 256 * 16
  382. .section .bss.page_aligned, "aw", @nobits
  383. .align PAGE_SIZE
  384. ENTRY(empty_zero_page)
  385. .skip PAGE_SIZE