suspend_asm_64.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /* Copyright 2004,2005 Pavel Machek <pavel@suse.cz>, Andi Kleen <ak@suse.de>, Rafael J. Wysocki <rjw@sisk.pl>
  2. *
  3. * Distribute under GPLv2.
  4. *
  5. * swsusp_arch_resume must not use any stack or any nonlocal variables while
  6. * copying pages:
  7. *
  8. * Its rewriting one kernel image with another. What is stack in "old"
  9. * image could very well be data page in "new" image, and overwriting
  10. * your own stack under you is bad idea.
  11. */
  12. .text
  13. #include <linux/linkage.h>
  14. #include <asm/segment.h>
  15. #include <asm/page.h>
  16. #include <asm/asm-offsets.h>
  17. ENTRY(swsusp_arch_suspend)
  18. movq $saved_context, %rax
  19. movq %rsp, pt_regs_rsp(%rax)
  20. movq %rbp, pt_regs_rbp(%rax)
  21. movq %rsi, pt_regs_rsi(%rax)
  22. movq %rdi, pt_regs_rdi(%rax)
  23. movq %rbx, pt_regs_rbx(%rax)
  24. movq %rcx, pt_regs_rcx(%rax)
  25. movq %rdx, pt_regs_rdx(%rax)
  26. movq %r8, pt_regs_r8(%rax)
  27. movq %r9, pt_regs_r9(%rax)
  28. movq %r10, pt_regs_r10(%rax)
  29. movq %r11, pt_regs_r11(%rax)
  30. movq %r12, pt_regs_r12(%rax)
  31. movq %r13, pt_regs_r13(%rax)
  32. movq %r14, pt_regs_r14(%rax)
  33. movq %r15, pt_regs_r15(%rax)
  34. pushfq
  35. popq pt_regs_eflags(%rax)
  36. /* save the address of restore_registers */
  37. movq $restore_registers, %rax
  38. movq %rax, restore_jump_address(%rip)
  39. /* save cr3 */
  40. movq %cr3, %rax
  41. movq %rax, restore_cr3(%rip)
  42. call swsusp_save
  43. ret
  44. ENTRY(restore_image)
  45. /* switch to temporary page tables */
  46. movq $__PAGE_OFFSET, %rdx
  47. movq temp_level4_pgt(%rip), %rax
  48. subq %rdx, %rax
  49. movq %rax, %cr3
  50. /* Flush TLB */
  51. movq mmu_cr4_features(%rip), %rax
  52. movq %rax, %rdx
  53. andq $~(1<<7), %rdx # PGE
  54. movq %rdx, %cr4; # turn off PGE
  55. movq %cr3, %rcx; # flush TLB
  56. movq %rcx, %cr3;
  57. movq %rax, %cr4; # turn PGE back on
  58. /* prepare to jump to the image kernel */
  59. movq restore_jump_address(%rip), %rax
  60. movq restore_cr3(%rip), %rbx
  61. /* prepare to copy image data to their original locations */
  62. movq restore_pblist(%rip), %rdx
  63. movq relocated_restore_code(%rip), %rcx
  64. jmpq *%rcx
  65. /* code below has been relocated to a safe page */
  66. ENTRY(core_restore_code)
  67. loop:
  68. testq %rdx, %rdx
  69. jz done
  70. /* get addresses from the pbe and copy the page */
  71. movq pbe_address(%rdx), %rsi
  72. movq pbe_orig_address(%rdx), %rdi
  73. movq $(PAGE_SIZE >> 3), %rcx
  74. rep
  75. movsq
  76. /* progress to the next pbe */
  77. movq pbe_next(%rdx), %rdx
  78. jmp loop
  79. done:
  80. /* jump to the restore_registers address from the image header */
  81. jmpq *%rax
  82. /*
  83. * NOTE: This assumes that the boot kernel's text mapping covers the
  84. * image kernel's page containing restore_registers and the address of
  85. * this page is the same as in the image kernel's text mapping (it
  86. * should always be true, because the text mapping is linear, starting
  87. * from 0, and is supposed to cover the entire kernel text for every
  88. * kernel).
  89. *
  90. * code below belongs to the image kernel
  91. */
  92. ENTRY(restore_registers)
  93. /* go back to the original page tables */
  94. movq %rbx, %cr3
  95. /* Flush TLB, including "global" things (vmalloc) */
  96. movq mmu_cr4_features(%rip), %rax
  97. movq %rax, %rdx
  98. andq $~(1<<7), %rdx; # PGE
  99. movq %rdx, %cr4; # turn off PGE
  100. movq %cr3, %rcx; # flush TLB
  101. movq %rcx, %cr3
  102. movq %rax, %cr4; # turn PGE back on
  103. /* We don't restore %rax, it must be 0 anyway */
  104. movq $saved_context, %rax
  105. movq pt_regs_rsp(%rax), %rsp
  106. movq pt_regs_rbp(%rax), %rbp
  107. movq pt_regs_rsi(%rax), %rsi
  108. movq pt_regs_rdi(%rax), %rdi
  109. movq pt_regs_rbx(%rax), %rbx
  110. movq pt_regs_rcx(%rax), %rcx
  111. movq pt_regs_rdx(%rax), %rdx
  112. movq pt_regs_r8(%rax), %r8
  113. movq pt_regs_r9(%rax), %r9
  114. movq pt_regs_r10(%rax), %r10
  115. movq pt_regs_r11(%rax), %r11
  116. movq pt_regs_r12(%rax), %r12
  117. movq pt_regs_r13(%rax), %r13
  118. movq pt_regs_r14(%rax), %r14
  119. movq pt_regs_r15(%rax), %r15
  120. pushq pt_regs_eflags(%rax)
  121. popfq
  122. xorq %rax, %rax
  123. /* tell the hibernation core that we've just restored the memory */
  124. movq %rax, in_suspend(%rip)
  125. ret