|
@@ -89,11 +89,11 @@ ENTRY(xen_iret)
|
|
|
*/
|
|
|
#ifdef CONFIG_SMP
|
|
|
GET_THREAD_INFO(%eax)
|
|
|
- movl TI_cpu(%eax), %eax
|
|
|
- movl __per_cpu_offset(,%eax,4), %eax
|
|
|
- mov xen_vcpu(%eax), %eax
|
|
|
+ movl %ss:TI_cpu(%eax), %eax
|
|
|
+ movl %ss:__per_cpu_offset(,%eax,4), %eax
|
|
|
+ mov %ss:xen_vcpu(%eax), %eax
|
|
|
#else
|
|
|
- movl xen_vcpu, %eax
|
|
|
+ movl %ss:xen_vcpu, %eax
|
|
|
#endif
|
|
|
|
|
|
/* check IF state we're restoring */
|
|
@@ -106,11 +106,11 @@ ENTRY(xen_iret)
|
|
|
* resuming the code, so we don't have to be worried about
|
|
|
* being preempted to another CPU.
|
|
|
*/
|
|
|
- setz XEN_vcpu_info_mask(%eax)
|
|
|
+ setz %ss:XEN_vcpu_info_mask(%eax)
|
|
|
xen_iret_start_crit:
|
|
|
|
|
|
/* check for unmasked and pending */
|
|
|
- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
|
|
|
+ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
|
|
|
|
|
|
/*
|
|
|
* If there's something pending, mask events again so we can
|
|
@@ -118,7 +118,7 @@ xen_iret_start_crit:
|
|
|
* touch XEN_vcpu_info_mask.
|
|
|
*/
|
|
|
jne 1f
|
|
|
- movb $1, XEN_vcpu_info_mask(%eax)
|
|
|
+ movb $1, %ss:XEN_vcpu_info_mask(%eax)
|
|
|
|
|
|
1: popl %eax
|
|
|
|