suspend_asm_64.S 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /* Copyright 2004,2005 Pavel Machek <pavel@suse.cz>, Andi Kleen <ak@suse.de>, Rafael J. Wysocki <rjw@sisk.pl>
  2. *
  3. * Distribute under GPLv2.
  4. *
  5. * swsusp_arch_resume must not use any stack or any nonlocal variables while
  6. * copying pages:
  7. *
  8. * Its rewriting one kernel image with another. What is stack in "old"
  9. * image could very well be data page in "new" image, and overwriting
  10. * your own stack under you is bad idea.
  11. */
  12. .text
  13. #include <linux/linkage.h>
  14. #include <asm/segment.h>
  15. #include <asm/page.h>
  16. #include <asm/asm-offsets.h>
  17. ENTRY(swsusp_arch_suspend)
  18. movq %rsp, saved_context_esp(%rip)
  19. movq %rax, saved_context_eax(%rip)
  20. movq %rbx, saved_context_ebx(%rip)
  21. movq %rcx, saved_context_ecx(%rip)
  22. movq %rdx, saved_context_edx(%rip)
  23. movq %rbp, saved_context_ebp(%rip)
  24. movq %rsi, saved_context_esi(%rip)
  25. movq %rdi, saved_context_edi(%rip)
  26. movq %r8, saved_context_r08(%rip)
  27. movq %r9, saved_context_r09(%rip)
  28. movq %r10, saved_context_r10(%rip)
  29. movq %r11, saved_context_r11(%rip)
  30. movq %r12, saved_context_r12(%rip)
  31. movq %r13, saved_context_r13(%rip)
  32. movq %r14, saved_context_r14(%rip)
  33. movq %r15, saved_context_r15(%rip)
  34. pushfq ; popq saved_context_eflags(%rip)
  35. /* save the address of restore_registers */
  36. movq $restore_registers, %rax
  37. movq %rax, restore_jump_address(%rip)
  38. call swsusp_save
  39. ret
  40. ENTRY(restore_image)
  41. /* switch to temporary page tables */
  42. movq $__PAGE_OFFSET, %rdx
  43. movq temp_level4_pgt(%rip), %rax
  44. subq %rdx, %rax
  45. movq %rax, %cr3
  46. /* Flush TLB */
  47. movq mmu_cr4_features(%rip), %rax
  48. movq %rax, %rdx
  49. andq $~(1<<7), %rdx # PGE
  50. movq %rdx, %cr4; # turn off PGE
  51. movq %cr3, %rcx; # flush TLB
  52. movq %rcx, %cr3;
  53. movq %rax, %cr4; # turn PGE back on
  54. /* prepare to jump to the image kernel */
  55. movq restore_jump_address(%rip), %rax
  56. /* prepare to copy image data to their original locations */
  57. movq restore_pblist(%rip), %rdx
  58. movq relocated_restore_code(%rip), %rcx
  59. jmpq *%rcx
  60. /* code below has been relocated to a safe page */
  61. ENTRY(core_restore_code)
  62. loop:
  63. testq %rdx, %rdx
  64. jz done
  65. /* get addresses from the pbe and copy the page */
  66. movq pbe_address(%rdx), %rsi
  67. movq pbe_orig_address(%rdx), %rdi
  68. movq $(PAGE_SIZE >> 3), %rcx
  69. rep
  70. movsq
  71. /* progress to the next pbe */
  72. movq pbe_next(%rdx), %rdx
  73. jmp loop
  74. done:
  75. /* jump to the restore_registers address from the image header */
  76. jmpq *%rax
  77. /*
  78. * NOTE: This assumes that the boot kernel's text mapping covers the
  79. * image kernel's page containing restore_registers and the address of
  80. * this page is the same as in the image kernel's text mapping (it
  81. * should always be true, because the text mapping is linear, starting
  82. * from 0, and is supposed to cover the entire kernel text for every
  83. * kernel).
  84. *
  85. * code below belongs to the image kernel
  86. */
  87. ENTRY(restore_registers)
  88. /* go back to the original page tables */
  89. movq $(init_level4_pgt - __START_KERNEL_map), %rax
  90. addq phys_base(%rip), %rax
  91. movq %rax, %cr3
  92. /* Flush TLB, including "global" things (vmalloc) */
  93. movq mmu_cr4_features(%rip), %rax
  94. movq %rax, %rdx
  95. andq $~(1<<7), %rdx; # PGE
  96. movq %rdx, %cr4; # turn off PGE
  97. movq %cr3, %rcx; # flush TLB
  98. movq %rcx, %cr3
  99. movq %rax, %cr4; # turn PGE back on
  100. movq saved_context_esp(%rip), %rsp
  101. movq saved_context_ebp(%rip), %rbp
  102. /* restore GPRs (we don't restore %rax, it must be 0 anyway) */
  103. movq saved_context_ebx(%rip), %rbx
  104. movq saved_context_ecx(%rip), %rcx
  105. movq saved_context_edx(%rip), %rdx
  106. movq saved_context_esi(%rip), %rsi
  107. movq saved_context_edi(%rip), %rdi
  108. movq saved_context_r08(%rip), %r8
  109. movq saved_context_r09(%rip), %r9
  110. movq saved_context_r10(%rip), %r10
  111. movq saved_context_r11(%rip), %r11
  112. movq saved_context_r12(%rip), %r12
  113. movq saved_context_r13(%rip), %r13
  114. movq saved_context_r14(%rip), %r14
  115. movq saved_context_r15(%rip), %r15
  116. pushq saved_context_eflags(%rip) ; popfq
  117. xorq %rax, %rax
  118. /* tell the hibernation core that we've just restored the memory */
  119. movq %rax, in_suspend(%rip)
  120. ret