lguest_asm.S 3.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. #include <linux/linkage.h>
  2. #include <linux/lguest.h>
  3. #include <asm/asm-offsets.h>
  4. #include <asm/thread_info.h>
  5. #include <asm/processor-flags.h>
  6. /*G:020 This is where we begin: we have a magic signature which the launcher
  7. * looks for. The plan is that the Linux boot protocol will be extended with a
  8. * "platform type" field which will guide us here from the normal entry point,
  9. * but for the moment this suffices. The normal boot code uses %esi for the
  10. * boot header, so we do too. We convert it to a virtual address by adding
  11. * PAGE_OFFSET, and hand it to lguest_init() as its argument (ie. %eax).
  12. *
  13. * The .section line puts this code in .init.text so it will be discarded after
  14. * boot. */
  15. .section .init.text, "ax", @progbits
  16. .ascii "GenuineLguest"
  17. /* Set up initial stack. */
  18. movl $(init_thread_union+THREAD_SIZE),%esp
  19. movl %esi, %eax
  20. addl $__PAGE_OFFSET, %eax
  21. jmp lguest_init
  22. /*G:055 We create a macro which puts the assembler code between lgstart_ and
  23. * lgend_ markers. These templates end up in the .init.text section, so they
  24. * are discarded after boot. */
  25. #define LGUEST_PATCH(name, insns...) \
  26. lgstart_##name: insns; lgend_##name:; \
  27. .globl lgstart_##name; .globl lgend_##name
  28. LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
  29. LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
  30. LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
  31. LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
  32. /*:*/
  33. .text
  34. /* These demark the EIP range where host should never deliver interrupts. */
  35. .global lguest_noirq_start
  36. .global lguest_noirq_end
  37. /*G:045 There is one final paravirt_op that the Guest implements, and glancing
  38. * at it you can see why I left it to last. It's *cool*! It's in *assembler*!
  39. *
  40. * The "iret" instruction is used to return from an interrupt or trap. The
  41. * stack looks like this:
  42. * old address
  43. * old code segment & privilege level
  44. * old processor flags ("eflags")
  45. *
  46. * The "iret" instruction pops those values off the stack and restores them all
  47. * at once. The only problem is that eflags includes the Interrupt Flag which
  48. * the Guest can't change: the CPU will simply ignore it when we do an "iret".
  49. * So we have to copy eflags from the stack to lguest_data.irq_enabled before
  50. * we do the "iret".
  51. *
  52. * There are two problems with this: firstly, we need to use a register to do
  53. * the copy and secondly, the whole thing needs to be atomic. The first
  54. * problem is easy to solve: push %eax on the stack so we can use it, and then
  55. * restore it at the end just before the real "iret".
  56. *
  57. * The second is harder: copying eflags to lguest_data.irq_enabled will turn
  58. * interrupts on before we're finished, so we could be interrupted before we
  59. * return to userspace or wherever. Our solution to this is to surround the
  60. * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the
  61. * Host that it is *never* to interrupt us there, even if interrupts seem to be
  62. * enabled. */
  63. ENTRY(lguest_iret)
  64. pushl %eax
  65. movl 12(%esp), %eax
  66. lguest_noirq_start:
  67. /* Note the %ss: segment prefix here. Normal data accesses use the
  68. * "ds" segment, but that will have already been restored for whatever
  69. * we're returning to (such as userspace): we can't trust it. The %ss:
  70. * prefix makes sure we use the stack segment, which is still valid. */
  71. movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
  72. popl %eax
  73. iret
  74. lguest_noirq_end: