|
@@ -392,6 +392,13 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
|
|
|
|
|
|
+int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
|
+ void *data, int offset, int len, u32 access)
|
|
|
+{
|
|
|
+ return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
|
|
|
+ data, offset, len, access);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Load the pae pdptrs. Return true is they are all valid.
|
|
|
*/
|
|
@@ -403,8 +410,9 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|
|
int ret;
|
|
|
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
|
|
|
|
|
|
- ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
|
|
|
- offset * sizeof(u64), sizeof(pdpte));
|
|
|
+ ret = kvm_read_nested_guest_page(vcpu, pdpt_gfn, pdpte,
|
|
|
+ offset * sizeof(u64), sizeof(pdpte),
|
|
|
+ PFERR_USER_MASK|PFERR_WRITE_MASK);
|
|
|
if (ret < 0) {
|
|
|
ret = 0;
|
|
|
goto out;
|
|
@@ -433,6 +441,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
|
|
|
bool changed = true;
|
|
|
+ int offset;
|
|
|
+ gfn_t gfn;
|
|
|
int r;
|
|
|
|
|
|
if (is_long_mode(vcpu) || !is_pae(vcpu))
|
|
@@ -442,7 +452,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
|
|
|
(unsigned long *)&vcpu->arch.regs_avail))
|
|
|
return true;
|
|
|
|
|
|
- r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
|
|
|
+ gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
|
|
|
+ offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
|
|
|
+ r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
|
|
|
+ PFERR_USER_MASK | PFERR_WRITE_MASK);
|
|
|
if (r < 0)
|
|
|
goto out;
|
|
|
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
|