|
@@ -2388,16 +2388,15 @@ done:
|
|
|
}
|
|
|
|
|
|
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|
|
- unsigned pt_access, unsigned pte_access,
|
|
|
- int write_fault, int *emulate, int level, gfn_t gfn,
|
|
|
- pfn_t pfn, bool speculative, bool host_writable)
|
|
|
+ unsigned pte_access, int write_fault, int *emulate,
|
|
|
+ int level, gfn_t gfn, pfn_t pfn, bool speculative,
|
|
|
+ bool host_writable)
|
|
|
{
|
|
|
int was_rmapped = 0;
|
|
|
int rmap_count;
|
|
|
|
|
|
- pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n",
|
|
|
- __func__, *sptep, pt_access,
|
|
|
- write_fault, gfn);
|
|
|
+ pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
|
|
|
+ *sptep, write_fault, gfn);
|
|
|
|
|
|
if (is_rmap_spte(*sptep)) {
|
|
|
/*
|
|
@@ -2513,7 +2512,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
|
|
|
return -1;
|
|
|
|
|
|
for (i = 0; i < ret; i++, gfn++, start++)
|
|
|
- mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL,
|
|
|
+ mmu_set_spte(vcpu, start, access, 0, NULL,
|
|
|
sp->role.level, gfn, page_to_pfn(pages[i]),
|
|
|
true, true);
|
|
|
|
|
@@ -2574,9 +2573,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
|
|
|
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
|
|
|
if (iterator.level == level) {
|
|
|
- unsigned pte_access = ACC_ALL;
|
|
|
-
|
|
|
- mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
|
|
|
+ mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
|
|
|
write, &emulate, level, gfn, pfn,
|
|
|
prefault, map_writable);
|
|
|
direct_pte_prefetch(vcpu, iterator.sptep);
|