|
@@ -223,9 +223,7 @@ hyper_iret:
|
|
ds } SAVE_ALL state
|
|
ds } SAVE_ALL state
|
|
eax }
|
|
eax }
|
|
: :
|
|
: :
|
|
- ebx }
|
|
|
|
- ----------------
|
|
|
|
- return addr <- esp
|
|
|
|
|
|
+ ebx }<- esp
|
|
----------------
|
|
----------------
|
|
|
|
|
|
In order to deliver the nested exception properly, we need to shift
|
|
In order to deliver the nested exception properly, we need to shift
|
|
@@ -240,10 +238,8 @@ hyper_iret:
|
|
it's usermode state which we eventually need to restore.
|
|
it's usermode state which we eventually need to restore.
|
|
*/
|
|
*/
|
|
ENTRY(xen_iret_crit_fixup)
|
|
ENTRY(xen_iret_crit_fixup)
|
|
- /* offsets +4 for return address */
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
- Paranoia: Make sure we're really coming from userspace.
|
|
|
|
|
|
+ Paranoia: Make sure we're really coming from kernel space.
|
|
One could imagine a case where userspace jumps into the
|
|
One could imagine a case where userspace jumps into the
|
|
critical range address, but just before the CPU delivers a GP,
|
|
critical range address, but just before the CPU delivers a GP,
|
|
it decides to deliver an interrupt instead. Unlikely?
|
|
it decides to deliver an interrupt instead. Unlikely?
|
|
@@ -252,32 +248,32 @@ ENTRY(xen_iret_crit_fixup)
|
|
jump instruction itself, not the destination, but some virtual
|
|
jump instruction itself, not the destination, but some virtual
|
|
environments get this wrong.
|
|
environments get this wrong.
|
|
*/
|
|
*/
|
|
- movl PT_CS+4(%esp), %ecx
|
|
|
|
|
|
+ movl PT_CS(%esp), %ecx
|
|
andl $SEGMENT_RPL_MASK, %ecx
|
|
andl $SEGMENT_RPL_MASK, %ecx
|
|
cmpl $USER_RPL, %ecx
|
|
cmpl $USER_RPL, %ecx
|
|
je 2f
|
|
je 2f
|
|
|
|
|
|
- lea PT_ORIG_EAX+4(%esp), %esi
|
|
|
|
- lea PT_EFLAGS+4(%esp), %edi
|
|
|
|
|
|
+ lea PT_ORIG_EAX(%esp), %esi
|
|
|
|
+ lea PT_EFLAGS(%esp), %edi
|
|
|
|
|
|
/* If eip is before iret_restore_end then stack
|
|
/* If eip is before iret_restore_end then stack
|
|
hasn't been restored yet. */
|
|
hasn't been restored yet. */
|
|
cmp $iret_restore_end, %eax
|
|
cmp $iret_restore_end, %eax
|
|
jae 1f
|
|
jae 1f
|
|
|
|
|
|
- movl 0+4(%edi),%eax /* copy EAX */
|
|
|
|
- movl %eax, PT_EAX+4(%esp)
|
|
|
|
|
|
+ movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */
|
|
|
|
+ movl %eax, PT_EAX(%esp)
|
|
|
|
|
|
lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */
|
|
lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */
|
|
|
|
|
|
/* set up the copy */
|
|
/* set up the copy */
|
|
1: std
|
|
1: std
|
|
- mov $(PT_EIP+4) / 4, %ecx /* copy ret+saved regs up to orig_eax */
|
|
|
|
|
|
+ mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
|
|
rep movsl
|
|
rep movsl
|
|
cld
|
|
cld
|
|
|
|
|
|
lea 4(%edi),%esp /* point esp to new frame */
|
|
lea 4(%edi),%esp /* point esp to new frame */
|
|
-2: ret
|
|
|
|
|
|
+2: jmp xen_do_upcall
|
|
|
|
|
|
|
|
|
|
ENTRY(xen_sysexit)
|
|
ENTRY(xen_sysexit)
|