suspend_asm_64.S 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /* Copyright 2004,2005 Pavel Machek <pavel@suse.cz>, Andi Kleen <ak@suse.de>, Rafael J. Wysocki <rjw@sisk.pl>
  2. *
  3. * Distribute under GPLv2.
  4. *
  5. * swsusp_arch_resume must not use any stack or any nonlocal variables while
  6. * copying pages:
  7. *
  8. * Its rewriting one kernel image with another. What is stack in "old"
  9. * image could very well be data page in "new" image, and overwriting
  10. * your own stack under you is bad idea.
  11. */
  12. .text
  13. #include <linux/linkage.h>
  14. #include <asm/segment.h>
  15. #include <asm/page.h>
  16. #include <asm/asm-offsets.h>
  17. ENTRY(swsusp_arch_suspend)
  18. movq %rsp, saved_context_esp(%rip)
  19. movq %rax, saved_context_eax(%rip)
  20. movq %rbx, saved_context_ebx(%rip)
  21. movq %rcx, saved_context_ecx(%rip)
  22. movq %rdx, saved_context_edx(%rip)
  23. movq %rbp, saved_context_ebp(%rip)
  24. movq %rsi, saved_context_esi(%rip)
  25. movq %rdi, saved_context_edi(%rip)
  26. movq %r8, saved_context_r08(%rip)
  27. movq %r9, saved_context_r09(%rip)
  28. movq %r10, saved_context_r10(%rip)
  29. movq %r11, saved_context_r11(%rip)
  30. movq %r12, saved_context_r12(%rip)
  31. movq %r13, saved_context_r13(%rip)
  32. movq %r14, saved_context_r14(%rip)
  33. movq %r15, saved_context_r15(%rip)
  34. pushfq ; popq saved_context_eflags(%rip)
  35. /* save the address of restore_registers */
  36. movq $restore_registers, %rax
  37. movq %rax, restore_jump_address(%rip)
  38. /* save cr3 */
  39. movq %cr3, %rax
  40. movq %rax, restore_cr3(%rip)
  41. call swsusp_save
  42. ret
  43. ENTRY(restore_image)
  44. /* switch to temporary page tables */
  45. movq $__PAGE_OFFSET, %rdx
  46. movq temp_level4_pgt(%rip), %rax
  47. subq %rdx, %rax
  48. movq %rax, %cr3
  49. /* Flush TLB */
  50. movq mmu_cr4_features(%rip), %rax
  51. movq %rax, %rdx
  52. andq $~(1<<7), %rdx # PGE
  53. movq %rdx, %cr4; # turn off PGE
  54. movq %cr3, %rcx; # flush TLB
  55. movq %rcx, %cr3;
  56. movq %rax, %cr4; # turn PGE back on
  57. /* prepare to jump to the image kernel */
  58. movq restore_jump_address(%rip), %rax
  59. movq restore_cr3(%rip), %rbx
  60. /* prepare to copy image data to their original locations */
  61. movq restore_pblist(%rip), %rdx
  62. movq relocated_restore_code(%rip), %rcx
  63. jmpq *%rcx
  64. /* code below has been relocated to a safe page */
  65. ENTRY(core_restore_code)
  66. loop:
  67. testq %rdx, %rdx
  68. jz done
  69. /* get addresses from the pbe and copy the page */
  70. movq pbe_address(%rdx), %rsi
  71. movq pbe_orig_address(%rdx), %rdi
  72. movq $(PAGE_SIZE >> 3), %rcx
  73. rep
  74. movsq
  75. /* progress to the next pbe */
  76. movq pbe_next(%rdx), %rdx
  77. jmp loop
  78. done:
  79. /* jump to the restore_registers address from the image header */
  80. jmpq *%rax
  81. /*
  82. * NOTE: This assumes that the boot kernel's text mapping covers the
  83. * image kernel's page containing restore_registers and the address of
  84. * this page is the same as in the image kernel's text mapping (it
  85. * should always be true, because the text mapping is linear, starting
  86. * from 0, and is supposed to cover the entire kernel text for every
  87. * kernel).
  88. *
  89. * code below belongs to the image kernel
  90. */
  91. ENTRY(restore_registers)
  92. /* go back to the original page tables */
  93. movq %rbx, %cr3
  94. /* Flush TLB, including "global" things (vmalloc) */
  95. movq mmu_cr4_features(%rip), %rax
  96. movq %rax, %rdx
  97. andq $~(1<<7), %rdx; # PGE
  98. movq %rdx, %cr4; # turn off PGE
  99. movq %cr3, %rcx; # flush TLB
  100. movq %rcx, %cr3
  101. movq %rax, %cr4; # turn PGE back on
  102. movq saved_context_esp(%rip), %rsp
  103. movq saved_context_ebp(%rip), %rbp
  104. /* restore GPRs (we don't restore %rax, it must be 0 anyway) */
  105. movq saved_context_ebx(%rip), %rbx
  106. movq saved_context_ecx(%rip), %rcx
  107. movq saved_context_edx(%rip), %rdx
  108. movq saved_context_esi(%rip), %rsi
  109. movq saved_context_edi(%rip), %rdi
  110. movq saved_context_r08(%rip), %r8
  111. movq saved_context_r09(%rip), %r9
  112. movq saved_context_r10(%rip), %r10
  113. movq saved_context_r11(%rip), %r11
  114. movq saved_context_r12(%rip), %r12
  115. movq saved_context_r13(%rip), %r13
  116. movq saved_context_r14(%rip), %r14
  117. movq saved_context_r15(%rip), %r15
  118. pushq saved_context_eflags(%rip) ; popfq
  119. xorq %rax, %rax
  120. /* tell the hibernation core that we've just restored the memory */
  121. movq %rax, in_suspend(%rip)
  122. ret