wakeup_64.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. .text
  2. #include <linux/linkage.h>
  3. #include <asm/segment.h>
  4. #include <asm/pgtable.h>
  5. #include <asm/page.h>
  6. #include <asm/msr.h>
  7. # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
  8. #
  9. # wakeup_code runs in real mode, and at unknown address (determined at run-time).
  10. # Therefore it must only use relative jumps/calls.
  11. #
  12. # Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
  13. #
  14. # If physical address of wakeup_code is 0x12345, BIOS should call us with
  15. # cs = 0x1234, eip = 0x05
  16. #
  17. #define BEEP \
  18. inb $97, %al; \
  19. outb %al, $0x80; \
  20. movb $3, %al; \
  21. outb %al, $97; \
  22. outb %al, $0x80; \
  23. movb $-74, %al; \
  24. outb %al, $67; \
  25. outb %al, $0x80; \
  26. movb $-119, %al; \
  27. outb %al, $66; \
  28. outb %al, $0x80; \
  29. movb $15, %al; \
  30. outb %al, $66;
  31. ALIGN
  32. .align 16
  33. ENTRY(wakeup_start)
  34. wakeup_code:
  35. wakeup_code_start = .
  36. .code16
  37. # Running in *copy* of this code, somewhere in low 1MB.
  38. movb $0xa1, %al ; outb %al, $0x80
  39. cli
  40. cld
  41. # setup data segment
  42. movw %cs, %ax
  43. movw %ax, %ds # Make ds:0 point to wakeup_start
  44. movw %ax, %ss
  45. # Data segment must be set up before we can see whether to beep.
  46. testl $4, realmode_flags - wakeup_code
  47. jz 1f
  48. BEEP
  49. 1:
  50. # Private stack is needed for ASUS board
  51. mov $(wakeup_stack - wakeup_code), %sp
  52. pushl $0 # Kill any dangerous flags
  53. popfl
  54. movl real_magic - wakeup_code, %eax
  55. cmpl $0x12345678, %eax
  56. jne bogus_real_magic
  57. call verify_cpu # Verify the cpu supports long
  58. # mode
  59. testl %eax, %eax
  60. jnz no_longmode
  61. testl $1, realmode_flags - wakeup_code
  62. jz 1f
  63. lcall $0xc000,$3
  64. movw %cs, %ax
  65. movw %ax, %ds # Bios might have played with that
  66. movw %ax, %ss
  67. 1:
  68. testl $2, realmode_flags - wakeup_code
  69. jz 1f
  70. mov video_mode - wakeup_code, %ax
  71. call mode_set
  72. 1:
  73. movw $0xb800, %ax
  74. movw %ax,%fs
  75. movw $0x0e00 + 'L', %fs:(0x10)
  76. movb $0xa2, %al ; outb %al, $0x80
  77. mov %ds, %ax # Find 32bit wakeup_code addr
  78. movzx %ax, %esi # (Convert %ds:gdt to a liner ptr)
  79. shll $4, %esi
  80. # Fix up the vectors
  81. addl %esi, wakeup_32_vector - wakeup_code
  82. addl %esi, wakeup_long64_vector - wakeup_code
  83. addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer
  84. lidtl %ds:idt_48a - wakeup_code
  85. lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is
  86. # appropriate
  87. movl $1, %eax # protected mode (PE) bit
  88. lmsw %ax # This is it!
  89. jmp 1f
  90. 1:
  91. ljmpl *(wakeup_32_vector - wakeup_code)
  92. .balign 4
  93. wakeup_32_vector:
  94. .long wakeup_32 - wakeup_code
  95. .word __KERNEL32_CS, 0
  96. .code32
  97. wakeup_32:
  98. # Running in this code, but at low address; paging is not yet turned on.
  99. movb $0xa5, %al ; outb %al, $0x80
  100. movl $__KERNEL_DS, %eax
  101. movl %eax, %ds
  102. movw $0x0e00 + 'i', %ds:(0xb8012)
  103. movb $0xa8, %al ; outb %al, $0x80;
  104. /*
  105. * Prepare for entering 64bits mode
  106. */
  107. /* Enable PAE */
  108. xorl %eax, %eax
  109. btsl $5, %eax
  110. movl %eax, %cr4
  111. /* Setup early boot stage 4 level pagetables */
  112. leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax
  113. movl %eax, %cr3
  114. /* Check if nx is implemented */
  115. movl $0x80000001, %eax
  116. cpuid
  117. movl %edx,%edi
  118. /* Enable Long Mode */
  119. xorl %eax, %eax
  120. btsl $_EFER_LME, %eax
  121. /* No Execute supported? */
  122. btl $20,%edi
  123. jnc 1f
  124. btsl $_EFER_NX, %eax
  125. /* Make changes effective */
  126. 1: movl $MSR_EFER, %ecx
  127. xorl %edx, %edx
  128. wrmsr
  129. xorl %eax, %eax
  130. btsl $31, %eax /* Enable paging and in turn activate Long Mode */
  131. btsl $0, %eax /* Enable protected mode */
  132. /* Make changes effective */
  133. movl %eax, %cr0
  134. /* At this point:
  135. CR4.PAE must be 1
  136. CS.L must be 0
  137. CR3 must point to PML4
  138. Next instruction must be a branch
  139. This must be on identity-mapped page
  140. */
  141. /*
  142. * At this point we're in long mode but in 32bit compatibility mode
  143. * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
  144. * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
  145. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
  146. */
  147. /* Finally jump in 64bit mode */
  148. ljmp *(wakeup_long64_vector - wakeup_code)(%esi)
  149. .balign 4
  150. wakeup_long64_vector:
  151. .long wakeup_long64 - wakeup_code
  152. .word __KERNEL_CS, 0
  153. .code64
  154. /* Hooray, we are in Long 64-bit mode (but still running in
  155. * low memory)
  156. */
  157. wakeup_long64:
  158. /*
  159. * We must switch to a new descriptor in kernel space for the GDT
  160. * because soon the kernel won't have access anymore to the userspace
  161. * addresses where we're currently running on. We have to do that here
  162. * because in 32bit we couldn't load a 64bit linear address.
  163. */
  164. lgdt cpu_gdt_descr
  165. movw $0x0e00 + 'n', %ds:(0xb8014)
  166. movb $0xa9, %al ; outb %al, $0x80
  167. movq saved_magic, %rax
  168. movq $0x123456789abcdef0, %rdx
  169. cmpq %rdx, %rax
  170. jne bogus_64_magic
  171. movw $0x0e00 + 'u', %ds:(0xb8016)
  172. nop
  173. nop
  174. movw $__KERNEL_DS, %ax
  175. movw %ax, %ss
  176. movw %ax, %ds
  177. movw %ax, %es
  178. movw %ax, %fs
  179. movw %ax, %gs
  180. movq saved_rsp, %rsp
  181. movw $0x0e00 + 'x', %ds:(0xb8018)
  182. movq saved_rbx, %rbx
  183. movq saved_rdi, %rdi
  184. movq saved_rsi, %rsi
  185. movq saved_rbp, %rbp
  186. movw $0x0e00 + '!', %ds:(0xb801a)
  187. movq saved_rip, %rax
  188. jmp *%rax
  189. .code32
  190. .align 64
  191. gdta:
  192. /* Its good to keep gdt in sync with one in trampoline.S */
  193. .word 0, 0, 0, 0 # dummy
  194. /* ??? Why I need the accessed bit set in order for this to work? */
  195. .quad 0x00cf9b000000ffff # __KERNEL32_CS
  196. .quad 0x00af9b000000ffff # __KERNEL_CS
  197. .quad 0x00cf93000000ffff # __KERNEL_DS
  198. idt_48a:
  199. .word 0 # idt limit = 0
  200. .word 0, 0 # idt base = 0L
  201. gdt_48a:
  202. .word 0x800 # gdt limit=2048,
  203. # 256 GDT entries
  204. .long gdta - wakeup_code # gdt base (relocated in later)
  205. real_magic: .quad 0
  206. video_mode: .quad 0
  207. realmode_flags: .quad 0
  208. .code16
  209. bogus_real_magic:
  210. movb $0xba,%al ; outb %al,$0x80
  211. jmp bogus_real_magic
  212. .code64
  213. bogus_64_magic:
  214. movb $0xb3,%al ; outb %al,$0x80
  215. jmp bogus_64_magic
  216. .code16
  217. no_longmode:
  218. movb $0xbc,%al ; outb %al,$0x80
  219. jmp no_longmode
  220. #include "../verify_cpu_64.S"
  221. /* This code uses an extended set of video mode numbers. These include:
  222. * Aliases for standard modes
  223. * NORMAL_VGA (-1)
  224. * EXTENDED_VGA (-2)
  225. * ASK_VGA (-3)
  226. * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
  227. * of compatibility when extending the table. These are between 0x00 and 0xff.
  228. */
  229. #define VIDEO_FIRST_MENU 0x0000
  230. /* Standard BIOS video modes (BIOS number + 0x0100) */
  231. #define VIDEO_FIRST_BIOS 0x0100
  232. /* VESA BIOS video modes (VESA number + 0x0200) */
  233. #define VIDEO_FIRST_VESA 0x0200
  234. /* Video7 special modes (BIOS number + 0x0900) */
  235. #define VIDEO_FIRST_V7 0x0900
  236. # Setting of user mode (AX=mode ID) => CF=success
  237. # For now, we only handle VESA modes (0x0200..0x03ff). To handle other
  238. # modes, we should probably compile in the video code from the boot
  239. # directory.
  240. .code16
  241. mode_set:
  242. movw %ax, %bx
  243. subb $VIDEO_FIRST_VESA>>8, %bh
  244. cmpb $2, %bh
  245. jb check_vesa
  246. setbad:
  247. clc
  248. ret
  249. check_vesa:
  250. orw $0x4000, %bx # Use linear frame buffer
  251. movw $0x4f02, %ax # VESA BIOS mode set call
  252. int $0x10
  253. cmpw $0x004f, %ax # AL=4f if implemented
  254. jnz setbad # AH=0 if OK
  255. stc
  256. ret
  257. wakeup_stack_begin: # Stack grows down
  258. .org 0xff0
  259. wakeup_stack: # Just below end of page
  260. .org 0x1000
  261. ENTRY(wakeup_level4_pgt)
  262. .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
  263. .fill 510,8,0
  264. /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
  265. .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
  266. ENTRY(wakeup_end)
  267. ##
  268. # acpi_copy_wakeup_routine
  269. #
  270. # Copy the above routine to low memory.
  271. #
  272. # Parameters:
  273. # %rdi: place to copy wakeup routine to
  274. #
  275. # Returned address is location of code in low memory (past data and stack)
  276. #
  277. .code64
  278. ENTRY(acpi_copy_wakeup_routine)
  279. pushq %rax
  280. pushq %rdx
  281. movl saved_video_mode, %edx
  282. movl %edx, video_mode - wakeup_start (,%rdi)
  283. movl acpi_realmode_flags, %edx
  284. movl %edx, realmode_flags - wakeup_start (,%rdi)
  285. movq $0x12345678, real_magic - wakeup_start (,%rdi)
  286. movq $0x123456789abcdef0, %rdx
  287. movq %rdx, saved_magic
  288. movq saved_magic, %rax
  289. movq $0x123456789abcdef0, %rdx
  290. cmpq %rdx, %rax
  291. jne bogus_64_magic
  292. # restore the regs we used
  293. popq %rdx
  294. popq %rax
  295. ENTRY(do_suspend_lowlevel_s4bios)
  296. ret
  297. .align 2
  298. .p2align 4,,15
  299. .globl do_suspend_lowlevel
  300. .type do_suspend_lowlevel,@function
  301. do_suspend_lowlevel:
  302. .LFB5:
  303. subq $8, %rsp
  304. xorl %eax, %eax
  305. call save_processor_state
  306. movq %rsp, saved_context_esp(%rip)
  307. movq %rax, saved_context_eax(%rip)
  308. movq %rbx, saved_context_ebx(%rip)
  309. movq %rcx, saved_context_ecx(%rip)
  310. movq %rdx, saved_context_edx(%rip)
  311. movq %rbp, saved_context_ebp(%rip)
  312. movq %rsi, saved_context_esi(%rip)
  313. movq %rdi, saved_context_edi(%rip)
  314. movq %r8, saved_context_r08(%rip)
  315. movq %r9, saved_context_r09(%rip)
  316. movq %r10, saved_context_r10(%rip)
  317. movq %r11, saved_context_r11(%rip)
  318. movq %r12, saved_context_r12(%rip)
  319. movq %r13, saved_context_r13(%rip)
  320. movq %r14, saved_context_r14(%rip)
  321. movq %r15, saved_context_r15(%rip)
  322. pushfq ; popq saved_context_eflags(%rip)
  323. movq $.L97, saved_rip(%rip)
  324. movq %rsp,saved_rsp
  325. movq %rbp,saved_rbp
  326. movq %rbx,saved_rbx
  327. movq %rdi,saved_rdi
  328. movq %rsi,saved_rsi
  329. addq $8, %rsp
  330. movl $3, %edi
  331. xorl %eax, %eax
  332. jmp acpi_enter_sleep_state
  333. .L97:
  334. .p2align 4,,7
  335. .L99:
  336. .align 4
  337. movl $24, %eax
  338. movw %ax, %ds
  339. movq saved_context+58(%rip), %rax
  340. movq %rax, %cr4
  341. movq saved_context+50(%rip), %rax
  342. movq %rax, %cr3
  343. movq saved_context+42(%rip), %rax
  344. movq %rax, %cr2
  345. movq saved_context+34(%rip), %rax
  346. movq %rax, %cr0
  347. pushq saved_context_eflags(%rip) ; popfq
  348. movq saved_context_esp(%rip), %rsp
  349. movq saved_context_ebp(%rip), %rbp
  350. movq saved_context_eax(%rip), %rax
  351. movq saved_context_ebx(%rip), %rbx
  352. movq saved_context_ecx(%rip), %rcx
  353. movq saved_context_edx(%rip), %rdx
  354. movq saved_context_esi(%rip), %rsi
  355. movq saved_context_edi(%rip), %rdi
  356. movq saved_context_r08(%rip), %r8
  357. movq saved_context_r09(%rip), %r9
  358. movq saved_context_r10(%rip), %r10
  359. movq saved_context_r11(%rip), %r11
  360. movq saved_context_r12(%rip), %r12
  361. movq saved_context_r13(%rip), %r13
  362. movq saved_context_r14(%rip), %r14
  363. movq saved_context_r15(%rip), %r15
  364. xorl %eax, %eax
  365. addq $8, %rsp
  366. jmp restore_processor_state
  367. .LFE5:
  368. .Lfe5:
  369. .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
  370. .data
  371. ALIGN
  372. ENTRY(saved_rbp) .quad 0
  373. ENTRY(saved_rsi) .quad 0
  374. ENTRY(saved_rdi) .quad 0
  375. ENTRY(saved_rbx) .quad 0
  376. ENTRY(saved_rip) .quad 0
  377. ENTRY(saved_rsp) .quad 0
  378. ENTRY(saved_magic) .quad 0