wakeup.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. .text
  2. #include <linux/linkage.h>
  3. #include <asm/segment.h>
  4. #include <asm/page.h>
  5. #include <asm/msr.h>
  6. # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
  7. #
  8. # wakeup_code runs in real mode, and at unknown address (determined at run-time).
  9. # Therefore it must only use relative jumps/calls.
  10. #
  11. # Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
  12. #
  13. # If physical address of wakeup_code is 0x12345, BIOS should call us with
  14. # cs = 0x1234, eip = 0x05
  15. #
  16. ALIGN
  17. .align 16
  18. ENTRY(wakeup_start)
  19. wakeup_code:
  20. wakeup_code_start = .
  21. .code16
  22. # Running in *copy* of this code, somewhere in low 1MB.
  23. movb $0xa1, %al ; outb %al, $0x80
  24. cli
  25. cld
  26. # setup data segment
  27. movw %cs, %ax
  28. movw %ax, %ds # Make ds:0 point to wakeup_start
  29. movw %ax, %ss
  30. mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board
  31. pushl $0 # Kill any dangerous flags
  32. popfl
  33. movl real_magic - wakeup_code, %eax
  34. cmpl $0x12345678, %eax
  35. jne bogus_real_magic
  36. testl $1, video_flags - wakeup_code
  37. jz 1f
  38. lcall $0xc000,$3
  39. movw %cs, %ax
  40. movw %ax, %ds # Bios might have played with that
  41. movw %ax, %ss
  42. 1:
  43. testl $2, video_flags - wakeup_code
  44. jz 1f
  45. mov video_mode - wakeup_code, %ax
  46. call mode_seta
  47. 1:
  48. movw $0xb800, %ax
  49. movw %ax,%fs
  50. movw $0x0e00 + 'L', %fs:(0x10)
  51. movb $0xa2, %al ; outb %al, $0x80
  52. lidt %ds:idt_48a - wakeup_code
  53. xorl %eax, %eax
  54. movw %ds, %ax # (Convert %ds:gdt to a linear ptr)
  55. shll $4, %eax
  56. addl $(gdta - wakeup_code), %eax
  57. movl %eax, gdt_48a +2 - wakeup_code
  58. lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is
  59. # appropriate
  60. movl $1, %eax # protected mode (PE) bit
  61. lmsw %ax # This is it!
  62. jmp 1f
  63. 1:
  64. .byte 0x66, 0xea # prefix + jmpi-opcode
  65. .long wakeup_32 - __START_KERNEL_map
  66. .word __KERNEL_CS
  67. .code32
  68. wakeup_32:
  69. # Running in this code, but at low address; paging is not yet turned on.
  70. movb $0xa5, %al ; outb %al, $0x80
  71. /* Check if extended functions are implemented */
  72. movl $0x80000000, %eax
  73. cpuid
  74. cmpl $0x80000000, %eax
  75. jbe bogus_cpu
  76. wbinvd
  77. mov $0x80000001, %eax
  78. cpuid
  79. btl $29, %edx
  80. jnc bogus_cpu
  81. movl %edx,%edi
  82. movw $__KERNEL_DS, %ax
  83. movw %ax, %ds
  84. movw %ax, %es
  85. movw %ax, %fs
  86. movw %ax, %gs
  87. movw $__KERNEL_DS, %ax
  88. movw %ax, %ss
  89. mov $(wakeup_stack - __START_KERNEL_map), %esp
  90. movl saved_magic - __START_KERNEL_map, %eax
  91. cmpl $0x9abcdef0, %eax
  92. jne bogus_32_magic
  93. /*
  94. * Prepare for entering 64bits mode
  95. */
  96. /* Enable PAE mode and PGE */
  97. xorl %eax, %eax
  98. btsl $5, %eax
  99. btsl $7, %eax
  100. movl %eax, %cr4
  101. /* Setup early boot stage 4 level pagetables */
  102. movl $(wakeup_level4_pgt - __START_KERNEL_map), %eax
  103. movl %eax, %cr3
  104. /* Setup EFER (Extended Feature Enable Register) */
  105. movl $MSR_EFER, %ecx
  106. rdmsr
  107. /* Fool rdmsr and reset %eax to avoid dependences */
  108. xorl %eax, %eax
  109. /* Enable Long Mode */
  110. btsl $_EFER_LME, %eax
  111. /* Enable System Call */
  112. btsl $_EFER_SCE, %eax
  113. /* No Execute supported? */
  114. btl $20,%edi
  115. jnc 1f
  116. btsl $_EFER_NX, %eax
  117. 1:
  118. /* Make changes effective */
  119. wrmsr
  120. wbinvd
  121. xorl %eax, %eax
  122. btsl $31, %eax /* Enable paging and in turn activate Long Mode */
  123. btsl $0, %eax /* Enable protected mode */
  124. btsl $1, %eax /* Enable MP */
  125. btsl $4, %eax /* Enable ET */
  126. btsl $5, %eax /* Enable NE */
  127. btsl $16, %eax /* Enable WP */
  128. btsl $18, %eax /* Enable AM */
  129. /* Make changes effective */
  130. movl %eax, %cr0
  131. /* At this point:
  132. CR4.PAE must be 1
  133. CS.L must be 0
  134. CR3 must point to PML4
  135. Next instruction must be a branch
  136. This must be on identity-mapped page
  137. */
  138. jmp reach_compatibility_mode
  139. reach_compatibility_mode:
  140. movw $0x0e00 + 'i', %ds:(0xb8012)
  141. movb $0xa8, %al ; outb %al, $0x80;
  142. /*
  143. * At this point we're in long mode but in 32bit compatibility mode
  144. * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
  145. * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
  146. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
  147. */
  148. movw $0x0e00 + 'n', %ds:(0xb8014)
  149. movb $0xa9, %al ; outb %al, $0x80
  150. /* Load new GDT with the 64bit segment using 32bit descriptor */
  151. movl $(pGDT32 - __START_KERNEL_map), %eax
  152. lgdt (%eax)
  153. movl $(wakeup_jumpvector - __START_KERNEL_map), %eax
  154. /* Finally jump in 64bit mode */
  155. ljmp *(%eax)
  156. wakeup_jumpvector:
  157. .long wakeup_long64 - __START_KERNEL_map
  158. .word __KERNEL_CS
  159. .code64
  160. /* Hooray, we are in Long 64-bit mode (but still running in low memory) */
  161. wakeup_long64:
  162. /*
  163. * We must switch to a new descriptor in kernel space for the GDT
  164. * because soon the kernel won't have access anymore to the userspace
  165. * addresses where we're currently running on. We have to do that here
  166. * because in 32bit we couldn't load a 64bit linear address.
  167. */
  168. lgdt cpu_gdt_descr - __START_KERNEL_map
  169. movw $0x0e00 + 'u', %ds:(0xb8016)
  170. nop
  171. nop
  172. movw $__KERNEL_DS, %ax
  173. movw %ax, %ss
  174. movw %ax, %ds
  175. movw %ax, %es
  176. movw %ax, %fs
  177. movw %ax, %gs
  178. movq saved_esp, %rsp
  179. movw $0x0e00 + 'x', %ds:(0xb8018)
  180. movq saved_ebx, %rbx
  181. movq saved_edi, %rdi
  182. movq saved_esi, %rsi
  183. movq saved_ebp, %rbp
  184. movw $0x0e00 + '!', %ds:(0xb801a)
  185. movq saved_eip, %rax
  186. jmp *%rax
  187. .code32
  188. .align 64
  189. gdta:
  190. .word 0, 0, 0, 0 # dummy
  191. .word 0, 0, 0, 0 # unused
  192. .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
  193. .word 0 # base address = 0
  194. .word 0x9B00 # code read/exec. ??? Why I need 0x9B00 (as opposed to 0x9A00 in order for this to work?)
  195. .word 0x00CF # granularity = 4096, 386
  196. # (+5th nibble of limit)
  197. .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
  198. .word 0 # base address = 0
  199. .word 0x9200 # data read/write
  200. .word 0x00CF # granularity = 4096, 386
  201. # (+5th nibble of limit)
  202. # this is 64bit descriptor for code
  203. .word 0xFFFF
  204. .word 0
  205. .word 0x9A00 # code read/exec
  206. .word 0x00AF # as above, but it is long mode and with D=0
  207. idt_48a:
  208. .word 0 # idt limit = 0
  209. .word 0, 0 # idt base = 0L
  210. gdt_48a:
  211. .word 0x8000 # gdt limit=2048,
  212. # 256 GDT entries
  213. .word 0, 0 # gdt base (filled in later)
  214. real_save_gdt: .word 0
  215. .quad 0
  216. real_magic: .quad 0
  217. video_mode: .quad 0
  218. video_flags: .quad 0
  219. bogus_real_magic:
  220. movb $0xba,%al ; outb %al,$0x80
  221. jmp bogus_real_magic
  222. bogus_32_magic:
  223. movb $0xb3,%al ; outb %al,$0x80
  224. jmp bogus_32_magic
  225. bogus_31_magic:
  226. movb $0xb1,%al ; outb %al,$0x80
  227. jmp bogus_31_magic
  228. bogus_cpu:
  229. movb $0xbc,%al ; outb %al,$0x80
  230. jmp bogus_cpu
  231. /* This code uses an extended set of video mode numbers. These include:
  232. * Aliases for standard modes
  233. * NORMAL_VGA (-1)
  234. * EXTENDED_VGA (-2)
  235. * ASK_VGA (-3)
  236. * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
  237. * of compatibility when extending the table. These are between 0x00 and 0xff.
  238. */
  239. #define VIDEO_FIRST_MENU 0x0000
  240. /* Standard BIOS video modes (BIOS number + 0x0100) */
  241. #define VIDEO_FIRST_BIOS 0x0100
  242. /* VESA BIOS video modes (VESA number + 0x0200) */
  243. #define VIDEO_FIRST_VESA 0x0200
  244. /* Video7 special modes (BIOS number + 0x0900) */
  245. #define VIDEO_FIRST_V7 0x0900
  246. # Setting of user mode (AX=mode ID) => CF=success
  247. mode_seta:
  248. movw %ax, %bx
  249. #if 0
  250. cmpb $0xff, %ah
  251. jz setalias
  252. testb $VIDEO_RECALC>>8, %ah
  253. jnz _setrec
  254. cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah
  255. jnc setres
  256. cmpb $VIDEO_FIRST_SPECIAL>>8, %ah
  257. jz setspc
  258. cmpb $VIDEO_FIRST_V7>>8, %ah
  259. jz setv7
  260. #endif
  261. cmpb $VIDEO_FIRST_VESA>>8, %ah
  262. jnc check_vesaa
  263. #if 0
  264. orb %ah, %ah
  265. jz setmenu
  266. #endif
  267. decb %ah
  268. # jz setbios Add bios modes later
  269. setbada: clc
  270. ret
  271. check_vesaa:
  272. subb $VIDEO_FIRST_VESA>>8, %bh
  273. orw $0x4000, %bx # Use linear frame buffer
  274. movw $0x4f02, %ax # VESA BIOS mode set call
  275. int $0x10
  276. cmpw $0x004f, %ax # AL=4f if implemented
  277. jnz _setbada # AH=0 if OK
  278. stc
  279. ret
  280. _setbada: jmp setbada
  281. .code64
  282. bogus_magic:
  283. movw $0x0e00 + 'B', %ds:(0xb8018)
  284. jmp bogus_magic
  285. bogus_magic2:
  286. movw $0x0e00 + '2', %ds:(0xb8018)
  287. jmp bogus_magic2
  288. wakeup_stack_begin: # Stack grows down
  289. .org 0xff0
  290. wakeup_stack: # Just below end of page
  291. ENTRY(wakeup_end)
  292. ##
  293. # acpi_copy_wakeup_routine
  294. #
  295. # Copy the above routine to low memory.
  296. #
  297. # Parameters:
  298. # %rdi: place to copy wakeup routine to
  299. #
  300. # Returned address is location of code in low memory (past data and stack)
  301. #
  302. ENTRY(acpi_copy_wakeup_routine)
  303. pushq %rax
  304. pushq %rcx
  305. pushq %rdx
  306. sgdt saved_gdt
  307. sidt saved_idt
  308. sldt saved_ldt
  309. str saved_tss
  310. movq %cr3, %rdx
  311. movq %rdx, saved_cr3
  312. movq %cr4, %rdx
  313. movq %rdx, saved_cr4
  314. movq %cr0, %rdx
  315. movq %rdx, saved_cr0
  316. sgdt real_save_gdt - wakeup_start (,%rdi)
  317. movl $MSR_EFER, %ecx
  318. rdmsr
  319. movl %eax, saved_efer
  320. movl %edx, saved_efer2
  321. movl saved_video_mode, %edx
  322. movl %edx, video_mode - wakeup_start (,%rdi)
  323. movl acpi_video_flags, %edx
  324. movl %edx, video_flags - wakeup_start (,%rdi)
  325. movq $0x12345678, real_magic - wakeup_start (,%rdi)
  326. movq $0x123456789abcdef0, %rdx
  327. movq %rdx, saved_magic
  328. movl saved_magic - __START_KERNEL_map, %eax
  329. cmpl $0x9abcdef0, %eax
  330. jne bogus_32_magic
  331. # make sure %cr4 is set correctly (features, etc)
  332. movl saved_cr4 - __START_KERNEL_map, %eax
  333. movq %rax, %cr4
  334. movl saved_cr0 - __START_KERNEL_map, %eax
  335. movq %rax, %cr0
  336. jmp 1f # Flush pipelines
  337. 1:
  338. # restore the regs we used
  339. popq %rdx
  340. popq %rcx
  341. popq %rax
  342. ENTRY(do_suspend_lowlevel_s4bios)
  343. ret
  344. .align 2
  345. .p2align 4,,15
  346. .globl do_suspend_lowlevel
  347. .type do_suspend_lowlevel,@function
  348. do_suspend_lowlevel:
  349. .LFB5:
  350. subq $8, %rsp
  351. xorl %eax, %eax
  352. call save_processor_state
  353. movq %rsp, saved_context_esp(%rip)
  354. movq %rax, saved_context_eax(%rip)
  355. movq %rbx, saved_context_ebx(%rip)
  356. movq %rcx, saved_context_ecx(%rip)
  357. movq %rdx, saved_context_edx(%rip)
  358. movq %rbp, saved_context_ebp(%rip)
  359. movq %rsi, saved_context_esi(%rip)
  360. movq %rdi, saved_context_edi(%rip)
  361. movq %r8, saved_context_r08(%rip)
  362. movq %r9, saved_context_r09(%rip)
  363. movq %r10, saved_context_r10(%rip)
  364. movq %r11, saved_context_r11(%rip)
  365. movq %r12, saved_context_r12(%rip)
  366. movq %r13, saved_context_r13(%rip)
  367. movq %r14, saved_context_r14(%rip)
  368. movq %r15, saved_context_r15(%rip)
  369. pushfq ; popq saved_context_eflags(%rip)
  370. movq $.L97, saved_eip(%rip)
  371. movq %rsp,saved_esp
  372. movq %rbp,saved_ebp
  373. movq %rbx,saved_ebx
  374. movq %rdi,saved_edi
  375. movq %rsi,saved_esi
  376. addq $8, %rsp
  377. movl $3, %edi
  378. xorl %eax, %eax
  379. jmp acpi_enter_sleep_state
  380. .L97:
  381. .p2align 4,,7
  382. .L99:
  383. .align 4
  384. movl $24, %eax
  385. movw %ax, %ds
  386. movq saved_context+58(%rip), %rax
  387. movq %rax, %cr4
  388. movq saved_context+50(%rip), %rax
  389. movq %rax, %cr3
  390. movq saved_context+42(%rip), %rax
  391. movq %rax, %cr2
  392. movq saved_context+34(%rip), %rax
  393. movq %rax, %cr0
  394. pushq saved_context_eflags(%rip) ; popfq
  395. movq saved_context_esp(%rip), %rsp
  396. movq saved_context_ebp(%rip), %rbp
  397. movq saved_context_eax(%rip), %rax
  398. movq saved_context_ebx(%rip), %rbx
  399. movq saved_context_ecx(%rip), %rcx
  400. movq saved_context_edx(%rip), %rdx
  401. movq saved_context_esi(%rip), %rsi
  402. movq saved_context_edi(%rip), %rdi
  403. movq saved_context_r08(%rip), %r8
  404. movq saved_context_r09(%rip), %r9
  405. movq saved_context_r10(%rip), %r10
  406. movq saved_context_r11(%rip), %r11
  407. movq saved_context_r12(%rip), %r12
  408. movq saved_context_r13(%rip), %r13
  409. movq saved_context_r14(%rip), %r14
  410. movq saved_context_r15(%rip), %r15
  411. xorl %eax, %eax
  412. addq $8, %rsp
  413. jmp restore_processor_state
  414. .LFE5:
  415. .Lfe5:
  416. .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
  417. .data
  418. ALIGN
  419. ENTRY(saved_ebp) .quad 0
  420. ENTRY(saved_esi) .quad 0
  421. ENTRY(saved_edi) .quad 0
  422. ENTRY(saved_ebx) .quad 0
  423. ENTRY(saved_eip) .quad 0
  424. ENTRY(saved_esp) .quad 0
  425. ENTRY(saved_magic) .quad 0
  426. ALIGN
  427. # saved registers
  428. saved_gdt: .quad 0,0
  429. saved_idt: .quad 0,0
  430. saved_ldt: .quad 0
  431. saved_tss: .quad 0
  432. saved_cr0: .quad 0
  433. saved_cr3: .quad 0
  434. saved_cr4: .quad 0
  435. saved_efer: .quad 0
  436. saved_efer2: .quad 0