head.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /*
  2. * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7. * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8. *
  9. * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
  10. */
  11. #include <linux/linkage.h>
  12. #include <linux/threads.h>
  13. #include <linux/init.h>
  14. #include <asm/desc.h>
  15. #include <asm/segment.h>
  16. #include <asm/page.h>
  17. #include <asm/msr.h>
  18. #include <asm/cache.h>
  19. /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
  20. * because we need identity-mapped pages on setup so define __START_KERNEL to
  21. * 0x100000 for this stage
  22. *
  23. */
  24. .text
  25. .code32
  26. .globl startup_32
  27. /* %bx: 1 if coming from smp trampoline on secondary cpu */
  28. startup_32:
  29. /*
  30. * At this point the CPU runs in 32bit protected mode (CS.D = 1) with
  31. * paging disabled and the point of this file is to switch to 64bit
  32. * long mode with a kernel mapping for kerneland to jump into the
  33. * kernel virtual addresses.
  34. * There is no stack until we set one up.
  35. */
  36. /* Initialize the %ds segment register */
  37. movl $__KERNEL_DS,%eax
  38. movl %eax,%ds
  39. /* Load new GDT with the 64bit segments using 32bit descriptor */
  40. lgdt pGDT32 - __START_KERNEL_map
  41. /* If the CPU doesn't support CPUID this will double fault.
  42. * Unfortunately it is hard to check for CPUID without a stack.
  43. */
  44. /* Check if extended functions are implemented */
  45. movl $0x80000000, %eax
  46. cpuid
  47. cmpl $0x80000000, %eax
  48. jbe no_long_mode
  49. /* Check if long mode is implemented */
  50. mov $0x80000001, %eax
  51. cpuid
  52. btl $29, %edx
  53. jnc no_long_mode
  54. /*
  55. * Prepare for entering 64bits mode
  56. */
  57. /* Enable PAE mode */
  58. xorl %eax, %eax
  59. btsl $5, %eax
  60. movl %eax, %cr4
  61. /* Setup early boot stage 4 level pagetables */
  62. movl $(boot_level4_pgt - __START_KERNEL_map), %eax
  63. movl %eax, %cr3
  64. /* Setup EFER (Extended Feature Enable Register) */
  65. movl $MSR_EFER, %ecx
  66. rdmsr
  67. /* Enable Long Mode */
  68. btsl $_EFER_LME, %eax
  69. /* Make changes effective */
  70. wrmsr
  71. xorl %eax, %eax
  72. btsl $31, %eax /* Enable paging and in turn activate Long Mode */
  73. btsl $0, %eax /* Enable protected mode */
  74. /* Make changes effective */
  75. movl %eax, %cr0
  76. /*
  77. * At this point we're in long mode but in 32bit compatibility mode
  78. * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
  79. * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
  80. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
  81. */
  82. ljmp $__KERNEL_CS, $(startup_64 - __START_KERNEL_map)
  83. .code64
  84. .org 0x100
  85. .globl startup_64
  86. startup_64:
  87. /* We come here either from startup_32
  88. * or directly from a 64bit bootloader.
  89. * Since we may have come directly from a bootloader we
  90. * reload the page tables here.
  91. */
  92. /* Enable PAE mode and PGE */
  93. xorq %rax, %rax
  94. btsq $5, %rax
  95. btsq $7, %rax
  96. movq %rax, %cr4
  97. /* Setup early boot stage 4 level pagetables. */
  98. movq $(boot_level4_pgt - __START_KERNEL_map), %rax
  99. movq %rax, %cr3
  100. /* Check if nx is implemented */
  101. movl $0x80000001, %eax
  102. cpuid
  103. movl %edx,%edi
  104. /* Setup EFER (Extended Feature Enable Register) */
  105. movl $MSR_EFER, %ecx
  106. rdmsr
  107. /* Enable System Call */
  108. btsl $_EFER_SCE, %eax
  109. /* No Execute supported? */
  110. btl $20,%edi
  111. jnc 1f
  112. btsl $_EFER_NX, %eax
  113. 1:
  114. /* Make changes effective */
  115. wrmsr
  116. /* Setup cr0 */
  117. #define CR0_PM 1 /* protected mode */
  118. #define CR0_MP (1<<1)
  119. #define CR0_ET (1<<4)
  120. #define CR0_NE (1<<5)
  121. #define CR0_WP (1<<16)
  122. #define CR0_AM (1<<18)
  123. #define CR0_PAGING (1<<31)
  124. movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax
  125. /* Make changes effective */
  126. movq %rax, %cr0
  127. /* Setup a boot time stack */
  128. movq init_rsp(%rip),%rsp
  129. /* zero EFLAGS after setting rsp */
  130. pushq $0
  131. popfq
  132. /*
  133. * We must switch to a new descriptor in kernel space for the GDT
  134. * because soon the kernel won't have access anymore to the userspace
  135. * addresses where we're currently running on. We have to do that here
  136. * because in 32bit we couldn't load a 64bit linear address.
  137. */
  138. lgdt cpu_gdt_descr
  139. /*
  140. * Setup up a dummy PDA. this is just for some early bootup code
  141. * that does in_interrupt()
  142. */
  143. movl $MSR_GS_BASE,%ecx
  144. movq $empty_zero_page,%rax
  145. movq %rax,%rdx
  146. shrq $32,%rdx
  147. wrmsr
  148. /* set up data segments. actually 0 would do too */
  149. movl $__KERNEL_DS,%eax
  150. movl %eax,%ds
  151. movl %eax,%ss
  152. movl %eax,%es
  153. /* esi is pointer to real mode structure with interesting info.
  154. pass it to C */
  155. movl %esi, %edi
  156. /* Finally jump to run C code and to be on real kernel address
  157. * Since we are running on identity-mapped space we have to jump
  158. * to the full 64bit address , this is only possible as indirect
  159. * jump
  160. */
  161. movq initial_code(%rip),%rax
  162. jmp *%rax
  163. /* SMP bootup changes these two */
  164. .globl initial_code
  165. initial_code:
  166. .quad x86_64_start_kernel
  167. .globl init_rsp
  168. init_rsp:
  169. .quad init_thread_union+THREAD_SIZE-8
  170. ENTRY(early_idt_handler)
  171. cmpl $2,early_recursion_flag(%rip)
  172. jz 1f
  173. incl early_recursion_flag(%rip)
  174. xorl %eax,%eax
  175. movq 8(%rsp),%rsi # get rip
  176. movq (%rsp),%rdx
  177. movq %cr2,%rcx
  178. leaq early_idt_msg(%rip),%rdi
  179. call early_printk
  180. cmpl $2,early_recursion_flag(%rip)
  181. jz 1f
  182. call dump_stack
  183. 1: hlt
  184. jmp 1b
  185. early_recursion_flag:
  186. .long 0
  187. early_idt_msg:
  188. .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
  189. .code32
  190. ENTRY(no_long_mode)
  191. /* This isn't an x86-64 CPU so hang */
  192. 1:
  193. jmp 1b
  194. .org 0xf00
  195. .globl pGDT32
  196. pGDT32:
  197. .word gdt_end-cpu_gdt_table
  198. .long cpu_gdt_table-__START_KERNEL_map
  199. .org 0xf10
  200. ljumpvector:
  201. .long startup_64-__START_KERNEL_map
  202. .word __KERNEL_CS
  203. ENTRY(stext)
  204. ENTRY(_stext)
  205. .org 0x1000
  206. ENTRY(init_level4_pgt)
  207. /* This gets initialized in x86_64_start_kernel */
  208. .fill 512,8,0
  209. .org 0x2000
  210. ENTRY(level3_ident_pgt)
  211. .quad 0x0000000000004007 + __PHYSICAL_START
  212. .fill 511,8,0
  213. .org 0x3000
  214. ENTRY(level3_kernel_pgt)
  215. .fill 510,8,0
  216. /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
  217. .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt */
  218. .fill 1,8,0
  219. .org 0x4000
  220. ENTRY(level2_ident_pgt)
  221. /* 40MB for bootup. */
  222. .quad 0x0000000000000083
  223. .quad 0x0000000000200083
  224. .quad 0x0000000000400083
  225. .quad 0x0000000000600083
  226. .quad 0x0000000000800083
  227. .quad 0x0000000000A00083
  228. .quad 0x0000000000C00083
  229. .quad 0x0000000000E00083
  230. .quad 0x0000000001000083
  231. .quad 0x0000000001200083
  232. .quad 0x0000000001400083
  233. .quad 0x0000000001600083
  234. .quad 0x0000000001800083
  235. .quad 0x0000000001A00083
  236. .quad 0x0000000001C00083
  237. .quad 0x0000000001E00083
  238. .quad 0x0000000002000083
  239. .quad 0x0000000002200083
  240. .quad 0x0000000002400083
  241. .quad 0x0000000002600083
  242. /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
  243. .globl temp_boot_pmds
  244. temp_boot_pmds:
  245. .fill 492,8,0
  246. .org 0x5000
  247. ENTRY(level2_kernel_pgt)
  248. /* 40MB kernel mapping. The kernel code cannot be bigger than that.
  249. When you change this change KERNEL_TEXT_SIZE in page.h too. */
  250. /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
  251. .quad 0x0000000000000183
  252. .quad 0x0000000000200183
  253. .quad 0x0000000000400183
  254. .quad 0x0000000000600183
  255. .quad 0x0000000000800183
  256. .quad 0x0000000000A00183
  257. .quad 0x0000000000C00183
  258. .quad 0x0000000000E00183
  259. .quad 0x0000000001000183
  260. .quad 0x0000000001200183
  261. .quad 0x0000000001400183
  262. .quad 0x0000000001600183
  263. .quad 0x0000000001800183
  264. .quad 0x0000000001A00183
  265. .quad 0x0000000001C00183
  266. .quad 0x0000000001E00183
  267. .quad 0x0000000002000183
  268. .quad 0x0000000002200183
  269. .quad 0x0000000002400183
  270. .quad 0x0000000002600183
  271. /* Module mapping starts here */
  272. .fill 492,8,0
  273. .org 0x6000
  274. ENTRY(empty_zero_page)
  275. .org 0x7000
  276. ENTRY(empty_bad_page)
  277. .org 0x8000
  278. ENTRY(empty_bad_pte_table)
  279. .org 0x9000
  280. ENTRY(empty_bad_pmd_table)
  281. .org 0xa000
  282. ENTRY(level3_physmem_pgt)
  283. .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
  284. .org 0xb000
  285. #ifdef CONFIG_ACPI_SLEEP
  286. ENTRY(wakeup_level4_pgt)
  287. .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
  288. .fill 255,8,0
  289. .quad 0x000000000000a007 + __PHYSICAL_START
  290. .fill 254,8,0
  291. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  292. .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
  293. #endif
  294. #ifndef CONFIG_HOTPLUG_CPU
  295. __INITDATA
  296. #endif
  297. /*
  298. * This default setting generates an ident mapping at address 0x100000
  299. * and a mapping for the kernel that precisely maps virtual address
  300. * 0xffffffff80000000 to physical address 0x000000. (always using
  301. * 2Mbyte large pages provided by PAE mode)
  302. */
  303. .align PAGE_SIZE
  304. ENTRY(boot_level4_pgt)
  305. .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
  306. .fill 255,8,0
  307. .quad 0x000000000000a007 + __PHYSICAL_START
  308. .fill 254,8,0
  309. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  310. .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
  311. .data
  312. .align 16
  313. .globl cpu_gdt_descr
  314. cpu_gdt_descr:
  315. .word gdt_end-cpu_gdt_table
  316. gdt:
  317. .quad cpu_gdt_table
  318. #ifdef CONFIG_SMP
  319. .rept NR_CPUS-1
  320. .word 0
  321. .quad 0
  322. .endr
  323. #endif
  324. /* We need valid kernel segments for data and code in long mode too
  325. * IRET will check the segment types kkeil 2000/10/28
  326. * Also sysret mandates a special GDT layout
  327. */
  328. .align PAGE_SIZE
  329. /* The TLS descriptors are currently at a different place compared to i386.
  330. Hopefully nobody expects them at a fixed place (Wine?) */
  331. ENTRY(cpu_gdt_table)
  332. .quad 0x0000000000000000 /* NULL descriptor */
  333. .quad 0x008f9a000000ffff /* __KERNEL_COMPAT32_CS */
  334. .quad 0x00af9a000000ffff /* __KERNEL_CS */
  335. .quad 0x00cf92000000ffff /* __KERNEL_DS */
  336. .quad 0x00cffa000000ffff /* __USER32_CS */
  337. .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
  338. .quad 0x00affa000000ffff /* __USER_CS */
  339. .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
  340. .quad 0,0 /* TSS */
  341. .quad 0,0 /* LDT */
  342. .quad 0,0,0 /* three TLS descriptors */
  343. .quad 0x00009a000000ffff /* __KERNEL16_CS - 16bit PM for S3 wakeup. */
  344. /* base must be patched for real base address. */
  345. gdt_end:
  346. /* asm/segment.h:GDT_ENTRIES must match this */
  347. /* This should be a multiple of the cache line size */
  348. /* GDTs of other CPUs are now dynamically allocated */
  349. /* zero the remaining page */
  350. .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
  351. ENTRY(idt_table)
  352. .rept 256
  353. .quad 0
  354. .quad 0
  355. .endr