cpu_64.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * Suspend and hibernation support for x86-64
  3. *
  4. * Distribute under GPLv2
  5. *
  6. * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  7. * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
  8. * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  9. */
  10. #include <linux/smp.h>
  11. #include <linux/suspend.h>
  12. #include <asm/proto.h>
  13. #include <asm/page.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/mtrr.h>
  16. #include <asm/xcr.h>
  17. static void fix_processor_context(void);
  18. struct saved_context saved_context;
  19. /**
  20. * __save_processor_state - save CPU registers before creating a
  21. * hibernation image and before restoring the memory state from it
  22. * @ctxt - structure to store the registers contents in
  23. *
  24. * NOTE: If there is a CPU register the modification of which by the
  25. * boot kernel (ie. the kernel used for loading the hibernation image)
  26. * might affect the operations of the restored target kernel (ie. the one
  27. * saved in the hibernation image), then its contents must be saved by this
  28. * function. In other words, if kernel A is hibernated and different
  29. * kernel B is used for loading the hibernation image into memory, the
  30. * kernel A's __save_processor_state() function must save all registers
  31. * needed by kernel A, so that it can operate correctly after the resume
  32. * regardless of what kernel B does in the meantime.
  33. */
  34. static void __save_processor_state(struct saved_context *ctxt)
  35. {
  36. kernel_fpu_begin();
  37. /*
  38. * descriptor tables
  39. */
  40. store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
  41. store_idt((struct desc_ptr *)&ctxt->idt_limit);
  42. store_tr(ctxt->tr);
  43. /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
  44. /*
  45. * segment registers
  46. */
  47. asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
  48. asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
  49. asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
  50. asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
  51. asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
  52. rdmsrl(MSR_FS_BASE, ctxt->fs_base);
  53. rdmsrl(MSR_GS_BASE, ctxt->gs_base);
  54. rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
  55. mtrr_save_fixed_ranges(NULL);
  56. /*
  57. * control registers
  58. */
  59. rdmsrl(MSR_EFER, ctxt->efer);
  60. ctxt->cr0 = read_cr0();
  61. ctxt->cr2 = read_cr2();
  62. ctxt->cr3 = read_cr3();
  63. ctxt->cr4 = read_cr4();
  64. ctxt->cr8 = read_cr8();
  65. }
  66. void save_processor_state(void)
  67. {
  68. __save_processor_state(&saved_context);
  69. }
  70. static void do_fpu_end(void)
  71. {
  72. /*
  73. * Restore FPU regs if necessary
  74. */
  75. kernel_fpu_end();
  76. }
  77. /**
  78. * __restore_processor_state - restore the contents of CPU registers saved
  79. * by __save_processor_state()
  80. * @ctxt - structure to load the registers contents from
  81. */
  82. static void __restore_processor_state(struct saved_context *ctxt)
  83. {
  84. /*
  85. * control registers
  86. */
  87. wrmsrl(MSR_EFER, ctxt->efer);
  88. write_cr8(ctxt->cr8);
  89. write_cr4(ctxt->cr4);
  90. write_cr3(ctxt->cr3);
  91. write_cr2(ctxt->cr2);
  92. write_cr0(ctxt->cr0);
  93. /*
  94. * now restore the descriptor tables to their proper values
  95. * ltr is done i fix_processor_context().
  96. */
  97. load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
  98. load_idt((const struct desc_ptr *)&ctxt->idt_limit);
  99. /*
  100. * segment registers
  101. */
  102. asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
  103. asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
  104. asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
  105. load_gs_index(ctxt->gs);
  106. asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
  107. wrmsrl(MSR_FS_BASE, ctxt->fs_base);
  108. wrmsrl(MSR_GS_BASE, ctxt->gs_base);
  109. wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
  110. /*
  111. * restore XCR0 for xsave capable cpu's.
  112. */
  113. if (cpu_has_xsave)
  114. xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
  115. fix_processor_context();
  116. do_fpu_end();
  117. mtrr_ap_init();
  118. }
  119. void restore_processor_state(void)
  120. {
  121. __restore_processor_state(&saved_context);
  122. }
  123. static void fix_processor_context(void)
  124. {
  125. int cpu = smp_processor_id();
  126. struct tss_struct *t = &per_cpu(init_tss, cpu);
  127. /*
  128. * This just modifies memory; should not be necessary. But... This
  129. * is necessary, because 386 hardware has concept of busy TSS or some
  130. * similar stupidity.
  131. */
  132. set_tss_desc(cpu, t);
  133. get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
  134. syscall_init(); /* This sets MSR_*STAR and related */
  135. load_TR_desc(); /* This does ltr */
  136. load_LDT(&current->active_mm->context); /* This does lldt */
  137. /*
  138. * Now maybe reload the debug registers
  139. */
  140. if (current->thread.debugreg7){
  141. loaddebug(&current->thread, 0);
  142. loaddebug(&current->thread, 1);
  143. loaddebug(&current->thread, 2);
  144. loaddebug(&current->thread, 3);
  145. /* no 4 and 5 */
  146. loaddebug(&current->thread, 6);
  147. loaddebug(&current->thread, 7);
  148. }
  149. }