|
@@ -152,6 +152,12 @@ static struct kmem_cache *mmu_page_header_cache;
|
|
|
|
|
|
static u64 __read_mostly shadow_trap_nonpresent_pte;
|
|
|
static u64 __read_mostly shadow_notrap_nonpresent_pte;
|
|
|
+static u64 __read_mostly shadow_base_present_pte;
|
|
|
+static u64 __read_mostly shadow_nx_mask;
|
|
|
+static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
|
|
|
+static u64 __read_mostly shadow_user_mask;
|
|
|
+static u64 __read_mostly shadow_accessed_mask;
|
|
|
+static u64 __read_mostly shadow_dirty_mask;
|
|
|
|
|
|
void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
|
|
|
{
|
|
@@ -160,6 +166,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
|
|
|
|
|
|
+void kvm_mmu_set_base_ptes(u64 base_pte)
|
|
|
+{
|
|
|
+ shadow_base_present_pte = base_pte;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
|
|
|
+
|
|
|
+void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
|
|
+ u64 dirty_mask, u64 nx_mask, u64 x_mask)
|
|
|
+{
|
|
|
+ shadow_user_mask = user_mask;
|
|
|
+ shadow_accessed_mask = accessed_mask;
|
|
|
+ shadow_dirty_mask = dirty_mask;
|
|
|
+ shadow_nx_mask = nx_mask;
|
|
|
+ shadow_x_mask = x_mask;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
|
|
+
|
|
|
static int is_write_protection(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
return vcpu->arch.cr0 & X86_CR0_WP;
|
|
@@ -198,7 +221,7 @@ static int is_writeble_pte(unsigned long pte)
|
|
|
|
|
|
static int is_dirty_pte(unsigned long pte)
|
|
|
{
|
|
|
- return pte & PT_DIRTY_MASK;
|
|
|
+ return pte & shadow_dirty_mask;
|
|
|
}
|
|
|
|
|
|
static int is_rmap_pte(u64 pte)
|
|
@@ -513,7 +536,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
|
|
|
return;
|
|
|
sp = page_header(__pa(spte));
|
|
|
pfn = spte_to_pfn(*spte);
|
|
|
- if (*spte & PT_ACCESSED_MASK)
|
|
|
+ if (*spte & shadow_accessed_mask)
|
|
|
kvm_set_pfn_accessed(pfn);
|
|
|
if (is_writeble_pte(*spte))
|
|
|
kvm_release_pfn_dirty(pfn);
|
|
@@ -1039,17 +1062,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
|
* whether the guest actually used the pte (in order to detect
|
|
|
* demand paging).
|
|
|
*/
|
|
|
- spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
|
|
|
+ spte = shadow_base_present_pte | shadow_dirty_mask;
|
|
|
if (!speculative)
|
|
|
pte_access |= PT_ACCESSED_MASK;
|
|
|
if (!dirty)
|
|
|
pte_access &= ~ACC_WRITE_MASK;
|
|
|
- if (!(pte_access & ACC_EXEC_MASK))
|
|
|
- spte |= PT64_NX_MASK;
|
|
|
-
|
|
|
- spte |= PT_PRESENT_MASK;
|
|
|
+ if (pte_access & ACC_EXEC_MASK)
|
|
|
+ spte |= shadow_x_mask;
|
|
|
+ else
|
|
|
+ spte |= shadow_nx_mask;
|
|
|
if (pte_access & ACC_USER_MASK)
|
|
|
- spte |= PT_USER_MASK;
|
|
|
+ spte |= shadow_user_mask;
|
|
|
if (largepage)
|
|
|
spte |= PT_PAGE_SIZE_MASK;
|
|
|
|
|
@@ -1155,7 +1178,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
}
|
|
|
|
|
|
table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
|
|
|
- | PT_WRITABLE_MASK | PT_USER_MASK;
|
|
|
+ | PT_WRITABLE_MASK | shadow_user_mask;
|
|
|
}
|
|
|
table_addr = table[index] & PT64_BASE_ADDR_MASK;
|
|
|
}
|
|
@@ -1599,7 +1622,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
u64 *spte = vcpu->arch.last_pte_updated;
|
|
|
|
|
|
- return !!(spte && (*spte & PT_ACCESSED_MASK));
|
|
|
+ return !!(spte && (*spte & shadow_accessed_mask));
|
|
|
}
|
|
|
|
|
|
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|