|
@@ -210,7 +210,7 @@ ENTRY(native_usergs_sysret64)
|
|
|
|
|
|
/* %rsp:at FRAMEEND */
|
|
|
.macro FIXUP_TOP_OF_STACK tmp offset=0
|
|
|
- movq %gs:pda_oldrsp,\tmp
|
|
|
+ movq PER_CPU_VAR(old_rsp),\tmp
|
|
|
movq \tmp,RSP+\offset(%rsp)
|
|
|
movq $__USER_DS,SS+\offset(%rsp)
|
|
|
movq $__USER_CS,CS+\offset(%rsp)
|
|
@@ -221,7 +221,7 @@ ENTRY(native_usergs_sysret64)
|
|
|
|
|
|
.macro RESTORE_TOP_OF_STACK tmp offset=0
|
|
|
movq RSP+\offset(%rsp),\tmp
|
|
|
- movq \tmp,%gs:pda_oldrsp
|
|
|
+ movq \tmp,PER_CPU_VAR(old_rsp)
|
|
|
movq EFLAGS+\offset(%rsp),\tmp
|
|
|
movq \tmp,R11+\offset(%rsp)
|
|
|
.endm
|
|
@@ -337,15 +337,15 @@ ENTRY(save_args)
|
|
|
je 1f
|
|
|
SWAPGS
|
|
|
/*
|
|
|
- * irqcount is used to check if a CPU is already on an interrupt stack
|
|
|
+ * irq_count is used to check if a CPU is already on an interrupt stack
|
|
|
* or not. While this is essentially redundant with preempt_count it is
|
|
|
* a little cheaper to use a separate counter in the PDA (short of
|
|
|
* moving irq_enter into assembly, which would be too much work)
|
|
|
*/
|
|
|
-1: incl %gs:pda_irqcount
|
|
|
+1: incl PER_CPU_VAR(irq_count)
|
|
|
jne 2f
|
|
|
popq_cfi %rax /* move return address... */
|
|
|
- mov %gs:pda_irqstackptr,%rsp
|
|
|
+ mov PER_CPU_VAR(irq_stack_ptr),%rsp
|
|
|
EMPTY_FRAME 0
|
|
|
pushq_cfi %rax /* ... to the new stack */
|
|
|
/*
|
|
@@ -468,7 +468,7 @@ END(ret_from_fork)
|
|
|
ENTRY(system_call)
|
|
|
CFI_STARTPROC simple
|
|
|
CFI_SIGNAL_FRAME
|
|
|
- CFI_DEF_CFA rsp,PDA_STACKOFFSET
|
|
|
+ CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
|
|
|
CFI_REGISTER rip,rcx
|
|
|
/*CFI_REGISTER rflags,r11*/
|
|
|
SWAPGS_UNSAFE_STACK
|
|
@@ -479,8 +479,8 @@ ENTRY(system_call)
|
|
|
*/
|
|
|
ENTRY(system_call_after_swapgs)
|
|
|
|
|
|
- movq %rsp,%gs:pda_oldrsp
|
|
|
- movq %gs:pda_kernelstack,%rsp
|
|
|
+ movq %rsp,PER_CPU_VAR(old_rsp)
|
|
|
+ movq PER_CPU_VAR(kernel_stack),%rsp
|
|
|
/*
|
|
|
* No need to follow this irqs off/on section - it's straight
|
|
|
* and short:
|
|
@@ -523,7 +523,7 @@ sysret_check:
|
|
|
CFI_REGISTER rip,rcx
|
|
|
RESTORE_ARGS 0,-ARG_SKIP,1
|
|
|
/*CFI_REGISTER rflags,r11*/
|
|
|
- movq %gs:pda_oldrsp, %rsp
|
|
|
+ movq PER_CPU_VAR(old_rsp), %rsp
|
|
|
USERGS_SYSRET64
|
|
|
|
|
|
CFI_RESTORE_STATE
|
|
@@ -833,11 +833,11 @@ common_interrupt:
|
|
|
XCPT_FRAME
|
|
|
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
|
|
|
interrupt do_IRQ
|
|
|
- /* 0(%rsp): oldrsp-ARGOFFSET */
|
|
|
+ /* 0(%rsp): old_rsp-ARGOFFSET */
|
|
|
ret_from_intr:
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
|
TRACE_IRQS_OFF
|
|
|
- decl %gs:pda_irqcount
|
|
|
+ decl PER_CPU_VAR(irq_count)
|
|
|
leaveq
|
|
|
CFI_DEF_CFA_REGISTER rsp
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
@@ -1260,14 +1260,14 @@ ENTRY(call_softirq)
|
|
|
CFI_REL_OFFSET rbp,0
|
|
|
mov %rsp,%rbp
|
|
|
CFI_DEF_CFA_REGISTER rbp
|
|
|
- incl %gs:pda_irqcount
|
|
|
- cmove %gs:pda_irqstackptr,%rsp
|
|
|
+ incl PER_CPU_VAR(irq_count)
|
|
|
+ cmove PER_CPU_VAR(irq_stack_ptr),%rsp
|
|
|
push %rbp # backlink for old unwinder
|
|
|
call __do_softirq
|
|
|
leaveq
|
|
|
CFI_DEF_CFA_REGISTER rsp
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
|
- decl %gs:pda_irqcount
|
|
|
+ decl PER_CPU_VAR(irq_count)
|
|
|
ret
|
|
|
CFI_ENDPROC
|
|
|
END(call_softirq)
|
|
@@ -1297,15 +1297,15 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
|
|
|
movq %rdi, %rsp # we don't return, adjust the stack frame
|
|
|
CFI_ENDPROC
|
|
|
DEFAULT_FRAME
|
|
|
-11: incl %gs:pda_irqcount
|
|
|
+11: incl PER_CPU_VAR(irq_count)
|
|
|
movq %rsp,%rbp
|
|
|
CFI_DEF_CFA_REGISTER rbp
|
|
|
- cmovzq %gs:pda_irqstackptr,%rsp
|
|
|
+ cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
|
|
|
pushq %rbp # backlink for old unwinder
|
|
|
call xen_evtchn_do_upcall
|
|
|
popq %rsp
|
|
|
CFI_DEF_CFA_REGISTER rsp
|
|
|
- decl %gs:pda_irqcount
|
|
|
+ decl PER_CPU_VAR(irq_count)
|
|
|
jmp error_exit
|
|
|
CFI_ENDPROC
|
|
|
END(do_hypervisor_callback)
|