|
@@ -39,6 +39,7 @@
|
|
|
#include <asm/uaccess.h>
|
|
|
#include <asm/msr.h>
|
|
|
#include <asm/desc.h>
|
|
|
+#include <asm/mtrr.h>
|
|
|
|
|
|
#define MAX_IO_MSRS 256
|
|
|
#define CR0_RESERVED_BITS \
|
|
@@ -650,10 +651,38 @@ static bool msr_mtrr_valid(unsigned msr)
|
|
|
|
|
|
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|
|
{
|
|
|
+ u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
|
|
+
|
|
|
if (!msr_mtrr_valid(msr))
|
|
|
return 1;
|
|
|
|
|
|
- vcpu->arch.mtrr[msr - 0x200] = data;
|
|
|
+ if (msr == MSR_MTRRdefType) {
|
|
|
+ vcpu->arch.mtrr_state.def_type = data;
|
|
|
+ vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
|
|
|
+ } else if (msr == MSR_MTRRfix64K_00000)
|
|
|
+ p[0] = data;
|
|
|
+ else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
|
|
+ p[1 + msr - MSR_MTRRfix16K_80000] = data;
|
|
|
+ else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
|
|
+ p[3 + msr - MSR_MTRRfix4K_C0000] = data;
|
|
|
+ else if (msr == MSR_IA32_CR_PAT)
|
|
|
+ vcpu->arch.pat = data;
|
|
|
+ else { /* Variable MTRRs */
|
|
|
+ int idx, is_mtrr_mask;
|
|
|
+ u64 *pt;
|
|
|
+
|
|
|
+ idx = (msr - 0x200) / 2;
|
|
|
+ is_mtrr_mask = msr - 0x200 - 2 * idx;
|
|
|
+ if (!is_mtrr_mask)
|
|
|
+ pt =
|
|
|
+ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
|
|
+ else
|
|
|
+ pt =
|
|
|
+ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
|
|
+ *pt = data;
|
|
|
+ }
|
|
|
+
|
|
|
+ kvm_mmu_reset_context(vcpu);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -749,10 +778,37 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
|
|
|
|
|
|
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
|
{
|
|
|
+ u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
|
|
+
|
|
|
if (!msr_mtrr_valid(msr))
|
|
|
return 1;
|
|
|
|
|
|
- *pdata = vcpu->arch.mtrr[msr - 0x200];
|
|
|
+ if (msr == MSR_MTRRdefType)
|
|
|
+ *pdata = vcpu->arch.mtrr_state.def_type +
|
|
|
+ (vcpu->arch.mtrr_state.enabled << 10);
|
|
|
+ else if (msr == MSR_MTRRfix64K_00000)
|
|
|
+ *pdata = p[0];
|
|
|
+ else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
|
|
+ *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
|
|
|
+ else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
|
|
+ *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
|
|
|
+ else if (msr == MSR_IA32_CR_PAT)
|
|
|
+ *pdata = vcpu->arch.pat;
|
|
|
+ else { /* Variable MTRRs */
|
|
|
+ int idx, is_mtrr_mask;
|
|
|
+ u64 *pt;
|
|
|
+
|
|
|
+ idx = (msr - 0x200) / 2;
|
|
|
+ is_mtrr_mask = msr - 0x200 - 2 * idx;
|
|
|
+ if (!is_mtrr_mask)
|
|
|
+ pt =
|
|
|
+ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
|
|
+ else
|
|
|
+ pt =
|
|
|
+ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
|
|
+ *pdata = *pt;
|
|
|
+ }
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3942,6 +3998,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
/* We do fxsave: this must be aligned. */
|
|
|
BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
|
|
|
|
|
|
+ vcpu->arch.mtrr_state.have_fixed = 1;
|
|
|
vcpu_load(vcpu);
|
|
|
r = kvm_arch_vcpu_reset(vcpu);
|
|
|
if (r == 0)
|