|
@@ -34,7 +34,7 @@ MODULE_LICENSE("GPL");
|
|
|
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
|
|
|
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
|
|
|
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
#define HOST_IS_64 1
|
|
|
#else
|
|
|
#define HOST_IS_64 0
|
|
@@ -71,7 +71,7 @@ static struct kvm_vmx_segment_field {
|
|
|
};
|
|
|
|
|
|
static const u32 vmx_msr_index[] = {
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
|
|
|
#endif
|
|
|
MSR_EFER, MSR_K6_STAR,
|
|
@@ -138,7 +138,7 @@ static u32 vmcs_read32(unsigned long field)
|
|
|
|
|
|
static u64 vmcs_read64(unsigned long field)
|
|
|
{
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
return vmcs_readl(field);
|
|
|
#else
|
|
|
return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
|
|
@@ -168,7 +168,7 @@ static void vmcs_write32(unsigned long field, u32 value)
|
|
|
|
|
|
static void vmcs_write64(unsigned long field, u64 value)
|
|
|
{
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
vmcs_writel(field, value);
|
|
|
#else
|
|
|
vmcs_writel(field, value);
|
|
@@ -297,7 +297,7 @@ static void guest_write_tsc(u64 guest_tsc)
|
|
|
|
|
|
static void reload_tss(void)
|
|
|
{
|
|
|
-#ifndef __x86_64__
|
|
|
+#ifndef CONFIG_X86_64
|
|
|
|
|
|
/*
|
|
|
* VT restores TR but not its size. Useless.
|
|
@@ -328,7 +328,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
|
|
|
}
|
|
|
|
|
|
switch (msr_index) {
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
case MSR_FS_BASE:
|
|
|
data = vmcs_readl(GUEST_FS_BASE);
|
|
|
break;
|
|
@@ -391,7 +391,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|
|
{
|
|
|
struct vmx_msr_entry *msr;
|
|
|
switch (msr_index) {
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
case MSR_FS_BASE:
|
|
|
vmcs_writel(GUEST_FS_BASE, data);
|
|
|
break;
|
|
@@ -726,7 +726,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
|
|
|
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
|
|
|
}
|
|
|
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
|
|
|
static void enter_lmode(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
@@ -768,7 +768,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
|
|
|
enter_rmode(vcpu);
|
|
|
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
if (vcpu->shadow_efer & EFER_LME) {
|
|
|
if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
|
|
|
enter_lmode(vcpu);
|
|
@@ -809,7 +809,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
|
vcpu->cr4 = cr4;
|
|
|
}
|
|
|
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
|
|
|
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|
|
{
|
|
@@ -1096,7 +1096,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
|
|
|
vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
|
|
|
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
rdmsrl(MSR_FS_BASE, a);
|
|
|
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
|
|
|
rdmsrl(MSR_GS_BASE, a);
|
|
@@ -1174,7 +1174,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
vcpu->cr0 = 0x60000010;
|
|
|
vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
|
|
|
vmx_set_cr4(vcpu, 0);
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
vmx_set_efer(vcpu, 0);
|
|
|
#endif
|
|
|
|
|
@@ -1690,7 +1690,7 @@ again:
|
|
|
vmcs_write16(HOST_GS_SELECTOR, 0);
|
|
|
}
|
|
|
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
|
|
|
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
|
|
|
#else
|
|
@@ -1714,7 +1714,7 @@ again:
|
|
|
asm (
|
|
|
/* Store host registers */
|
|
|
"pushf \n\t"
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
"push %%rax; push %%rbx; push %%rdx;"
|
|
|
"push %%rsi; push %%rdi; push %%rbp;"
|
|
|
"push %%r8; push %%r9; push %%r10; push %%r11;"
|
|
@@ -1728,7 +1728,7 @@ again:
|
|
|
/* Check if vmlaunch of vmresume is needed */
|
|
|
"cmp $0, %1 \n\t"
|
|
|
/* Load guest registers. Don't clobber flags. */
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
"mov %c[cr2](%3), %%rax \n\t"
|
|
|
"mov %%rax, %%cr2 \n\t"
|
|
|
"mov %c[rax](%3), %%rax \n\t"
|
|
@@ -1765,7 +1765,7 @@ again:
|
|
|
".globl kvm_vmx_return \n\t"
|
|
|
"kvm_vmx_return: "
|
|
|
/* Save guest registers, load host registers, keep flags */
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
"xchg %3, 0(%%rsp) \n\t"
|
|
|
"mov %%rax, %c[rax](%3) \n\t"
|
|
|
"mov %%rbx, %c[rbx](%3) \n\t"
|
|
@@ -1817,7 +1817,7 @@ again:
|
|
|
[rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
|
|
|
[rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
|
|
|
[rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
|
|
|
[r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
|
|
|
[r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
|
|
@@ -1838,7 +1838,7 @@ again:
|
|
|
fx_save(vcpu->guest_fx_image);
|
|
|
fx_restore(vcpu->host_fx_image);
|
|
|
|
|
|
-#ifndef __x86_64__
|
|
|
+#ifndef CONFIG_X86_64
|
|
|
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
|
|
|
#endif
|
|
|
|
|
@@ -1856,7 +1856,7 @@ again:
|
|
|
*/
|
|
|
local_irq_disable();
|
|
|
load_gs(gs_sel);
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
|
|
|
#endif
|
|
|
local_irq_enable();
|
|
@@ -1966,7 +1966,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
|
|
|
.set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
|
|
|
.set_cr3 = vmx_set_cr3,
|
|
|
.set_cr4 = vmx_set_cr4,
|
|
|
-#ifdef __x86_64__
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
.set_efer = vmx_set_efer,
|
|
|
#endif
|
|
|
.get_idt = vmx_get_idt,
|