wakeup.S 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. .text
  2. #include <linux/linkage.h>
  3. #include <asm/segment.h>
  4. #include <asm/page.h>
  5. #include <asm/msr.h>
  6. # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
  7. #
  8. # wakeup_code runs in real mode, and at unknown address (determined at run-time).
  9. # Therefore it must only use relative jumps/calls.
  10. #
  11. # Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
  12. #
  13. # If physical address of wakeup_code is 0x12345, BIOS should call us with
  14. # cs = 0x1234, eip = 0x05
  15. #
  16. ALIGN
  17. .align 16
  18. ENTRY(wakeup_start)
  19. wakeup_code:
  20. wakeup_code_start = .
  21. .code16
  22. # Running in *copy* of this code, somewhere in low 1MB.
  23. movb $0xa1, %al ; outb %al, $0x80
  24. cli
  25. cld
  26. # setup data segment
  27. movw %cs, %ax
  28. movw %ax, %ds # Make ds:0 point to wakeup_start
  29. movw %ax, %ss
  30. # Private stack is needed for ASUS board
  31. mov $(wakeup_stack - wakeup_code), %sp
  32. pushl $0 # Kill any dangerous flags
  33. popfl
  34. movl real_magic - wakeup_code, %eax
  35. cmpl $0x12345678, %eax
  36. jne bogus_real_magic
  37. testl $1, video_flags - wakeup_code
  38. jz 1f
  39. lcall $0xc000,$3
  40. movw %cs, %ax
  41. movw %ax, %ds # Bios might have played with that
  42. movw %ax, %ss
  43. 1:
  44. testl $2, video_flags - wakeup_code
  45. jz 1f
  46. mov video_mode - wakeup_code, %ax
  47. call mode_seta
  48. 1:
  49. movw $0xb800, %ax
  50. movw %ax,%fs
  51. movw $0x0e00 + 'L', %fs:(0x10)
  52. movb $0xa2, %al ; outb %al, $0x80
  53. lidt %ds:idt_48a - wakeup_code
  54. xorl %eax, %eax
  55. movw %ds, %ax # (Convert %ds:gdt to a linear ptr)
  56. shll $4, %eax
  57. addl $(gdta - wakeup_code), %eax
  58. movl %eax, gdt_48a +2 - wakeup_code
  59. lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is
  60. # appropriate
  61. movl $1, %eax # protected mode (PE) bit
  62. lmsw %ax # This is it!
  63. jmp 1f
  64. 1:
  65. ljmpl *(wakeup_32_vector - wakeup_code)
  66. .balign 4
  67. wakeup_32_vector:
  68. .long wakeup_32 - __START_KERNEL_map
  69. .word __KERNEL32_CS, 0
  70. .code32
  71. wakeup_32:
  72. # Running in this code, but at low address; paging is not yet turned on.
  73. movb $0xa5, %al ; outb %al, $0x80
  74. /* Check if extended functions are implemented */
  75. movl $0x80000000, %eax
  76. cpuid
  77. cmpl $0x80000000, %eax
  78. jbe bogus_cpu
  79. wbinvd
  80. mov $0x80000001, %eax
  81. cpuid
  82. btl $29, %edx
  83. jnc bogus_cpu
  84. movl %edx,%edi
  85. movl $__KERNEL_DS, %eax
  86. movl %eax, %ds
  87. movl saved_magic - __START_KERNEL_map, %eax
  88. cmpl $0x9abcdef0, %eax
  89. jne bogus_32_magic
  90. movw $0x0e00 + 'i', %ds:(0xb8012)
  91. movb $0xa8, %al ; outb %al, $0x80;
  92. /*
  93. * Prepare for entering 64bits mode
  94. */
  95. /* Enable PAE */
  96. xorl %eax, %eax
  97. btsl $5, %eax
  98. movl %eax, %cr4
  99. /* Setup early boot stage 4 level pagetables */
  100. movl $(wakeup_level4_pgt - __START_KERNEL_map), %eax
  101. movl %eax, %cr3
  102. /* Enable Long Mode */
  103. xorl %eax, %eax
  104. btsl $_EFER_LME, %eax
  105. /* No Execute supported? */
  106. btl $20,%edi
  107. jnc 1f
  108. btsl $_EFER_NX, %eax
  109. /* Make changes effective */
  110. 1: movl $MSR_EFER, %ecx
  111. xorl %edx, %edx
  112. wrmsr
  113. xorl %eax, %eax
  114. btsl $31, %eax /* Enable paging and in turn activate Long Mode */
  115. btsl $0, %eax /* Enable protected mode */
  116. /* Make changes effective */
  117. movl %eax, %cr0
  118. /* At this point:
  119. CR4.PAE must be 1
  120. CS.L must be 0
  121. CR3 must point to PML4
  122. Next instruction must be a branch
  123. This must be on identity-mapped page
  124. */
  125. /*
  126. * At this point we're in long mode but in 32bit compatibility mode
  127. * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
  128. * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
  129. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
  130. */
  131. /* Finally jump in 64bit mode */
  132. ljmp *(wakeup_long64_vector - __START_KERNEL_map)
  133. .balign 4
  134. wakeup_long64_vector:
  135. .long wakeup_long64 - __START_KERNEL_map
  136. .word __KERNEL_CS, 0
  137. .code64
  138. /* Hooray, we are in Long 64-bit mode (but still running in
  139. * low memory)
  140. */
  141. wakeup_long64:
  142. /*
  143. * We must switch to a new descriptor in kernel space for the GDT
  144. * because soon the kernel won't have access anymore to the userspace
  145. * addresses where we're currently running on. We have to do that here
  146. * because in 32bit we couldn't load a 64bit linear address.
  147. */
  148. lgdt cpu_gdt_descr - __START_KERNEL_map
  149. movw $0x0e00 + 'n', %ds:(0xb8014)
  150. movb $0xa9, %al ; outb %al, $0x80
  151. movw $0x0e00 + 'u', %ds:(0xb8016)
  152. nop
  153. nop
  154. movw $__KERNEL_DS, %ax
  155. movw %ax, %ss
  156. movw %ax, %ds
  157. movw %ax, %es
  158. movw %ax, %fs
  159. movw %ax, %gs
  160. movq saved_rsp, %rsp
  161. movw $0x0e00 + 'x', %ds:(0xb8018)
  162. movq saved_rbx, %rbx
  163. movq saved_rdi, %rdi
  164. movq saved_rsi, %rsi
  165. movq saved_rbp, %rbp
  166. movw $0x0e00 + '!', %ds:(0xb801a)
  167. movq saved_rip, %rax
  168. jmp *%rax
  169. .code32
  170. .align 64
  171. gdta:
  172. /* Its good to keep gdt in sync with one in trampoline.S */
  173. .word 0, 0, 0, 0 # dummy
  174. /* ??? Why I need the accessed bit set in order for this to work? */
  175. .quad 0x00cf9b000000ffff # __KERNEL32_CS
  176. .quad 0x00af9b000000ffff # __KERNEL_CS
  177. .quad 0x00cf93000000ffff # __KERNEL_DS
  178. idt_48a:
  179. .word 0 # idt limit = 0
  180. .word 0, 0 # idt base = 0L
  181. gdt_48a:
  182. .word 0x800 # gdt limit=2048,
  183. # 256 GDT entries
  184. .word 0, 0 # gdt base (filled in later)
  185. real_magic: .quad 0
  186. video_mode: .quad 0
  187. video_flags: .quad 0
  188. bogus_real_magic:
  189. movb $0xba,%al ; outb %al,$0x80
  190. jmp bogus_real_magic
  191. bogus_32_magic:
  192. movb $0xb3,%al ; outb %al,$0x80
  193. jmp bogus_32_magic
  194. bogus_cpu:
  195. movb $0xbc,%al ; outb %al,$0x80
  196. jmp bogus_cpu
  197. /* This code uses an extended set of video mode numbers. These include:
  198. * Aliases for standard modes
  199. * NORMAL_VGA (-1)
  200. * EXTENDED_VGA (-2)
  201. * ASK_VGA (-3)
  202. * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
  203. * of compatibility when extending the table. These are between 0x00 and 0xff.
  204. */
  205. #define VIDEO_FIRST_MENU 0x0000
  206. /* Standard BIOS video modes (BIOS number + 0x0100) */
  207. #define VIDEO_FIRST_BIOS 0x0100
  208. /* VESA BIOS video modes (VESA number + 0x0200) */
  209. #define VIDEO_FIRST_VESA 0x0200
  210. /* Video7 special modes (BIOS number + 0x0900) */
  211. #define VIDEO_FIRST_V7 0x0900
  212. # Setting of user mode (AX=mode ID) => CF=success
  213. mode_seta:
  214. movw %ax, %bx
  215. #if 0
  216. cmpb $0xff, %ah
  217. jz setalias
  218. testb $VIDEO_RECALC>>8, %ah
  219. jnz _setrec
  220. cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah
  221. jnc setres
  222. cmpb $VIDEO_FIRST_SPECIAL>>8, %ah
  223. jz setspc
  224. cmpb $VIDEO_FIRST_V7>>8, %ah
  225. jz setv7
  226. #endif
  227. cmpb $VIDEO_FIRST_VESA>>8, %ah
  228. jnc check_vesaa
  229. #if 0
  230. orb %ah, %ah
  231. jz setmenu
  232. #endif
  233. decb %ah
  234. # jz setbios Add bios modes later
  235. setbada: clc
  236. ret
  237. check_vesaa:
  238. subb $VIDEO_FIRST_VESA>>8, %bh
  239. orw $0x4000, %bx # Use linear frame buffer
  240. movw $0x4f02, %ax # VESA BIOS mode set call
  241. int $0x10
  242. cmpw $0x004f, %ax # AL=4f if implemented
  243. jnz _setbada # AH=0 if OK
  244. stc
  245. ret
  246. _setbada: jmp setbada
  247. wakeup_stack_begin: # Stack grows down
  248. .org 0xff0
  249. wakeup_stack: # Just below end of page
  250. ENTRY(wakeup_end)
  251. ##
  252. # acpi_copy_wakeup_routine
  253. #
  254. # Copy the above routine to low memory.
  255. #
  256. # Parameters:
  257. # %rdi: place to copy wakeup routine to
  258. #
  259. # Returned address is location of code in low memory (past data and stack)
  260. #
  261. .code64
  262. ENTRY(acpi_copy_wakeup_routine)
  263. pushq %rax
  264. pushq %rdx
  265. movl saved_video_mode, %edx
  266. movl %edx, video_mode - wakeup_start (,%rdi)
  267. movl acpi_video_flags, %edx
  268. movl %edx, video_flags - wakeup_start (,%rdi)
  269. movq $0x12345678, real_magic - wakeup_start (,%rdi)
  270. movq $0x123456789abcdef0, %rdx
  271. movq %rdx, saved_magic
  272. movl saved_magic - __START_KERNEL_map, %eax
  273. cmpl $0x9abcdef0, %eax
  274. jne bogus_32_magic
  275. # restore the regs we used
  276. popq %rdx
  277. popq %rax
  278. ENTRY(do_suspend_lowlevel_s4bios)
  279. ret
  280. .align 2
  281. .p2align 4,,15
  282. .globl do_suspend_lowlevel
  283. .type do_suspend_lowlevel,@function
  284. do_suspend_lowlevel:
  285. .LFB5:
  286. subq $8, %rsp
  287. xorl %eax, %eax
  288. call save_processor_state
  289. movq %rsp, saved_context_esp(%rip)
  290. movq %rax, saved_context_eax(%rip)
  291. movq %rbx, saved_context_ebx(%rip)
  292. movq %rcx, saved_context_ecx(%rip)
  293. movq %rdx, saved_context_edx(%rip)
  294. movq %rbp, saved_context_ebp(%rip)
  295. movq %rsi, saved_context_esi(%rip)
  296. movq %rdi, saved_context_edi(%rip)
  297. movq %r8, saved_context_r08(%rip)
  298. movq %r9, saved_context_r09(%rip)
  299. movq %r10, saved_context_r10(%rip)
  300. movq %r11, saved_context_r11(%rip)
  301. movq %r12, saved_context_r12(%rip)
  302. movq %r13, saved_context_r13(%rip)
  303. movq %r14, saved_context_r14(%rip)
  304. movq %r15, saved_context_r15(%rip)
  305. pushfq ; popq saved_context_eflags(%rip)
  306. movq $.L97, saved_rip(%rip)
  307. movq %rsp,saved_rsp
  308. movq %rbp,saved_rbp
  309. movq %rbx,saved_rbx
  310. movq %rdi,saved_rdi
  311. movq %rsi,saved_rsi
  312. addq $8, %rsp
  313. movl $3, %edi
  314. xorl %eax, %eax
  315. jmp acpi_enter_sleep_state
  316. .L97:
  317. .p2align 4,,7
  318. .L99:
  319. .align 4
  320. movl $24, %eax
  321. movw %ax, %ds
  322. movq saved_context+58(%rip), %rax
  323. movq %rax, %cr4
  324. movq saved_context+50(%rip), %rax
  325. movq %rax, %cr3
  326. movq saved_context+42(%rip), %rax
  327. movq %rax, %cr2
  328. movq saved_context+34(%rip), %rax
  329. movq %rax, %cr0
  330. pushq saved_context_eflags(%rip) ; popfq
  331. movq saved_context_esp(%rip), %rsp
  332. movq saved_context_ebp(%rip), %rbp
  333. movq saved_context_eax(%rip), %rax
  334. movq saved_context_ebx(%rip), %rbx
  335. movq saved_context_ecx(%rip), %rcx
  336. movq saved_context_edx(%rip), %rdx
  337. movq saved_context_esi(%rip), %rsi
  338. movq saved_context_edi(%rip), %rdi
  339. movq saved_context_r08(%rip), %r8
  340. movq saved_context_r09(%rip), %r9
  341. movq saved_context_r10(%rip), %r10
  342. movq saved_context_r11(%rip), %r11
  343. movq saved_context_r12(%rip), %r12
  344. movq saved_context_r13(%rip), %r13
  345. movq saved_context_r14(%rip), %r14
  346. movq saved_context_r15(%rip), %r15
  347. xorl %eax, %eax
  348. addq $8, %rsp
  349. jmp restore_processor_state
  350. .LFE5:
  351. .Lfe5:
  352. .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
  353. .data
  354. ALIGN
  355. ENTRY(saved_rbp) .quad 0
  356. ENTRY(saved_rsi) .quad 0
  357. ENTRY(saved_rdi) .quad 0
  358. ENTRY(saved_rbx) .quad 0
  359. ENTRY(saved_rip) .quad 0
  360. ENTRY(saved_rsp) .quad 0
  361. ENTRY(saved_magic) .quad 0