head.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7. * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8. * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/threads.h>
  12. #include <linux/init.h>
  13. #include <asm/desc.h>
  14. #include <asm/segment.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/page.h>
  17. #include <asm/msr.h>
  18. #include <asm/cache.h>
  19. /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
  20. * because we need identity-mapped pages.
  21. *
  22. */
  23. .text
  24. .section .bootstrap.text
  25. .code64
  26. .globl startup_64
  27. startup_64:
  28. /*
  29. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  30. * and someone has loaded an identity mapped page table
  31. * for us. These identity mapped page tables map all of the
  32. * kernel pages and possibly all of memory.
  33. *
  34. * %esi holds a physical pointer to real_mode_data.
  35. *
  36. * We come here either directly from a 64bit bootloader, or from
  37. * arch/x86_64/boot/compressed/head.S.
  38. *
  39. * We only come here initially at boot nothing else comes here.
  40. *
  41. * Since we may be loaded at an address different from what we were
  42. * compiled to run at we first fixup the physical addresses in our page
  43. * tables and then reload them.
  44. */
  45. /* Compute the delta between the address I am compiled to run at and the
  46. * address I am actually running at.
  47. */
  48. leaq _text(%rip), %rbp
  49. subq $_text - __START_KERNEL_map, %rbp
  50. /* Is the address not 2M aligned? */
  51. movq %rbp, %rax
  52. andl $~LARGE_PAGE_MASK, %eax
  53. testl %eax, %eax
  54. jnz bad_address
  55. /* Is the address too large? */
  56. leaq _text(%rip), %rdx
  57. movq $PGDIR_SIZE, %rax
  58. cmpq %rax, %rdx
  59. jae bad_address
  60. /* Fixup the physical addresses in the page table
  61. */
  62. addq %rbp, init_level4_pgt + 0(%rip)
  63. addq %rbp, init_level4_pgt + (258*8)(%rip)
  64. addq %rbp, init_level4_pgt + (511*8)(%rip)
  65. addq %rbp, level3_ident_pgt + 0(%rip)
  66. addq %rbp, level3_kernel_pgt + (510*8)(%rip)
  67. addq %rbp, level3_kernel_pgt + (511*8)(%rip)
  68. addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
  69. /* Add an Identity mapping if I am above 1G */
  70. leaq _text(%rip), %rdi
  71. andq $LARGE_PAGE_MASK, %rdi
  72. movq %rdi, %rax
  73. shrq $PUD_SHIFT, %rax
  74. andq $(PTRS_PER_PUD - 1), %rax
  75. jz ident_complete
  76. leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
  77. leaq level3_ident_pgt(%rip), %rbx
  78. movq %rdx, 0(%rbx, %rax, 8)
  79. movq %rdi, %rax
  80. shrq $PMD_SHIFT, %rax
  81. andq $(PTRS_PER_PMD - 1), %rax
  82. leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
  83. leaq level2_spare_pgt(%rip), %rbx
  84. movq %rdx, 0(%rbx, %rax, 8)
  85. ident_complete:
  86. /* Fixup the kernel text+data virtual addresses
  87. */
  88. leaq level2_kernel_pgt(%rip), %rdi
  89. leaq 4096(%rdi), %r8
  90. /* See if it is a valid page table entry */
  91. 1: testq $1, 0(%rdi)
  92. jz 2f
  93. addq %rbp, 0(%rdi)
  94. /* Go to the next page */
  95. 2: addq $8, %rdi
  96. cmp %r8, %rdi
  97. jne 1b
  98. /* Fixup phys_base */
  99. addq %rbp, phys_base(%rip)
  100. #ifdef CONFIG_SMP
  101. addq %rbp, trampoline_level4_pgt + 0(%rip)
  102. addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
  103. #endif
  104. #ifdef CONFIG_ACPI_SLEEP
  105. addq %rbp, wakeup_level4_pgt + 0(%rip)
  106. addq %rbp, wakeup_level4_pgt + (511*8)(%rip)
  107. #endif
  108. /* Due to ENTRY(), sometimes the empty space gets filled with
  109. * zeros. Better take a jmp than relying on empty space being
  110. * filled with 0x90 (nop)
  111. */
  112. jmp secondary_startup_64
  113. ENTRY(secondary_startup_64)
  114. /*
  115. * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
  116. * and someone has loaded a mapped page table.
  117. *
  118. * %esi holds a physical pointer to real_mode_data.
  119. *
  120. * We come here either from startup_64 (using physical addresses)
  121. * or from trampoline.S (using virtual addresses).
  122. *
  123. * Using virtual addresses from trampoline.S removes the need
  124. * to have any identity mapped pages in the kernel page table
  125. * after the boot processor executes this code.
  126. */
  127. /* Enable PAE mode and PGE */
  128. xorq %rax, %rax
  129. btsq $5, %rax
  130. btsq $7, %rax
  131. movq %rax, %cr4
  132. /* Setup early boot stage 4 level pagetables. */
  133. movq $(init_level4_pgt - __START_KERNEL_map), %rax
  134. addq phys_base(%rip), %rax
  135. movq %rax, %cr3
  136. /* Ensure I am executing from virtual addresses */
  137. movq $1f, %rax
  138. jmp *%rax
  139. 1:
  140. /* Check if nx is implemented */
  141. movl $0x80000001, %eax
  142. cpuid
  143. movl %edx,%edi
  144. /* Setup EFER (Extended Feature Enable Register) */
  145. movl $MSR_EFER, %ecx
  146. rdmsr
  147. btsl $_EFER_SCE, %eax /* Enable System Call */
  148. btl $20,%edi /* No Execute supported? */
  149. jnc 1f
  150. btsl $_EFER_NX, %eax
  151. 1: wrmsr /* Make changes effective */
  152. /* Setup cr0 */
  153. #define CR0_PM 1 /* protected mode */
  154. #define CR0_MP (1<<1)
  155. #define CR0_ET (1<<4)
  156. #define CR0_NE (1<<5)
  157. #define CR0_WP (1<<16)
  158. #define CR0_AM (1<<18)
  159. #define CR0_PAGING (1<<31)
  160. movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax
  161. /* Make changes effective */
  162. movq %rax, %cr0
  163. /* Setup a boot time stack */
  164. movq init_rsp(%rip),%rsp
  165. /* zero EFLAGS after setting rsp */
  166. pushq $0
  167. popfq
  168. /*
  169. * We must switch to a new descriptor in kernel space for the GDT
  170. * because soon the kernel won't have access anymore to the userspace
  171. * addresses where we're currently running on. We have to do that here
  172. * because in 32bit we couldn't load a 64bit linear address.
  173. */
  174. lgdt cpu_gdt_descr(%rip)
  175. /* set up data segments. actually 0 would do too */
  176. movl $__KERNEL_DS,%eax
  177. movl %eax,%ds
  178. movl %eax,%ss
  179. movl %eax,%es
  180. /*
  181. * We don't really need to load %fs or %gs, but load them anyway
  182. * to kill any stale realmode selectors. This allows execution
  183. * under VT hardware.
  184. */
  185. movl %eax,%fs
  186. movl %eax,%gs
  187. /*
  188. * Setup up a dummy PDA. this is just for some early bootup code
  189. * that does in_interrupt()
  190. */
  191. movl $MSR_GS_BASE,%ecx
  192. movq $empty_zero_page,%rax
  193. movq %rax,%rdx
  194. shrq $32,%rdx
  195. wrmsr
  196. /* esi is pointer to real mode structure with interesting info.
  197. pass it to C */
  198. movl %esi, %edi
  199. /* Finally jump to run C code and to be on real kernel address
  200. * Since we are running on identity-mapped space we have to jump
  201. * to the full 64bit address, this is only possible as indirect
  202. * jump. In addition we need to ensure %cs is set so we make this
  203. * a far return.
  204. */
  205. movq initial_code(%rip),%rax
  206. pushq $0 # fake return address to stop unwinder
  207. pushq $__KERNEL_CS # set correct cs
  208. pushq %rax # target address in negative space
  209. lretq
  210. /* SMP bootup changes these two */
  211. .align 8
  212. .globl initial_code
  213. initial_code:
  214. .quad x86_64_start_kernel
  215. .globl init_rsp
  216. init_rsp:
  217. .quad init_thread_union+THREAD_SIZE-8
  218. bad_address:
  219. jmp bad_address
  220. ENTRY(early_idt_handler)
  221. cmpl $2,early_recursion_flag(%rip)
  222. jz 1f
  223. incl early_recursion_flag(%rip)
  224. xorl %eax,%eax
  225. movq 8(%rsp),%rsi # get rip
  226. movq (%rsp),%rdx
  227. movq %cr2,%rcx
  228. leaq early_idt_msg(%rip),%rdi
  229. call early_printk
  230. cmpl $2,early_recursion_flag(%rip)
  231. jz 1f
  232. call dump_stack
  233. #ifdef CONFIG_KALLSYMS
  234. leaq early_idt_ripmsg(%rip),%rdi
  235. movq 8(%rsp),%rsi # get rip again
  236. call __print_symbol
  237. #endif
  238. 1: hlt
  239. jmp 1b
  240. early_recursion_flag:
  241. .long 0
  242. early_idt_msg:
  243. .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
  244. early_idt_ripmsg:
  245. .asciz "RIP %s\n"
  246. .balign PAGE_SIZE
  247. #define NEXT_PAGE(name) \
  248. .balign PAGE_SIZE; \
  249. ENTRY(name)
  250. /* Automate the creation of 1 to 1 mapping pmd entries */
  251. #define PMDS(START, PERM, COUNT) \
  252. i = 0 ; \
  253. .rept (COUNT) ; \
  254. .quad (START) + (i << 21) + (PERM) ; \
  255. i = i + 1 ; \
  256. .endr
  257. /*
  258. * This default setting generates an ident mapping at address 0x100000
  259. * and a mapping for the kernel that precisely maps virtual address
  260. * 0xffffffff80000000 to physical address 0x000000. (always using
  261. * 2Mbyte large pages provided by PAE mode)
  262. */
  263. NEXT_PAGE(init_level4_pgt)
  264. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  265. .fill 257,8,0
  266. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  267. .fill 252,8,0
  268. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  269. .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
  270. NEXT_PAGE(level3_ident_pgt)
  271. .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  272. .fill 511,8,0
  273. NEXT_PAGE(level3_kernel_pgt)
  274. .fill 510,8,0
  275. /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
  276. .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
  277. .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  278. NEXT_PAGE(level2_fixmap_pgt)
  279. .fill 506,8,0
  280. .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
  281. /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
  282. .fill 5,8,0
  283. NEXT_PAGE(level1_fixmap_pgt)
  284. .fill 512,8,0
  285. NEXT_PAGE(level2_ident_pgt)
  286. /* Since I easily can, map the first 1G.
  287. * Don't set NX because code runs from these pages.
  288. */
  289. PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
  290. NEXT_PAGE(level2_kernel_pgt)
  291. /* 40MB kernel mapping. The kernel code cannot be bigger than that.
  292. When you change this change KERNEL_TEXT_SIZE in page.h too. */
  293. /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
  294. PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
  295. KERNEL_TEXT_SIZE/PMD_SIZE)
  296. /* Module mapping starts here */
  297. .fill (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0
  298. NEXT_PAGE(level2_spare_pgt)
  299. .fill 512,8,0
  300. #undef PMDS
  301. #undef NEXT_PAGE
  302. .data
  303. .align 16
  304. .globl cpu_gdt_descr
  305. cpu_gdt_descr:
  306. .word gdt_end-cpu_gdt_table-1
  307. gdt:
  308. .quad cpu_gdt_table
  309. #ifdef CONFIG_SMP
  310. .rept NR_CPUS-1
  311. .word 0
  312. .quad 0
  313. .endr
  314. #endif
  315. ENTRY(phys_base)
  316. /* This must match the first entry in level2_kernel_pgt */
  317. .quad 0x0000000000000000
  318. /* We need valid kernel segments for data and code in long mode too
  319. * IRET will check the segment types kkeil 2000/10/28
  320. * Also sysret mandates a special GDT layout
  321. */
  322. .section .data.page_aligned, "aw"
  323. .align PAGE_SIZE
  324. /* The TLS descriptors are currently at a different place compared to i386.
  325. Hopefully nobody expects them at a fixed place (Wine?) */
  326. ENTRY(cpu_gdt_table)
  327. .quad 0x0000000000000000 /* NULL descriptor */
  328. .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
  329. .quad 0x00af9b000000ffff /* __KERNEL_CS */
  330. .quad 0x00cf93000000ffff /* __KERNEL_DS */
  331. .quad 0x00cffb000000ffff /* __USER32_CS */
  332. .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
  333. .quad 0x00affb000000ffff /* __USER_CS */
  334. .quad 0x0 /* unused */
  335. .quad 0,0 /* TSS */
  336. .quad 0,0 /* LDT */
  337. .quad 0,0,0 /* three TLS descriptors */
  338. .quad 0x0000f40000000000 /* node/CPU stored in limit */
  339. gdt_end:
  340. /* asm/segment.h:GDT_ENTRIES must match this */
  341. /* This should be a multiple of the cache line size */
  342. /* GDTs of other CPUs are now dynamically allocated */
  343. /* zero the remaining page */
  344. .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
  345. .section .bss, "aw", @nobits
  346. .align L1_CACHE_BYTES
  347. ENTRY(idt_table)
  348. .skip 256 * 16
  349. .section .bss.page_aligned, "aw", @nobits
  350. .align PAGE_SIZE
  351. ENTRY(empty_zero_page)
  352. .skip PAGE_SIZE