|
@@ -2097,6 +2097,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
|
{
|
|
{
|
|
struct kvm_mmu_page *sp;
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
|
|
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
|
|
int i;
|
|
int i;
|
|
u64 *pt;
|
|
u64 *pt;
|
|
@@ -2110,6 +2111,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
|
if (pt[i] & PT_WRITABLE_MASK)
|
|
if (pt[i] & PT_WRITABLE_MASK)
|
|
pt[i] &= ~PT_WRITABLE_MASK;
|
|
pt[i] &= ~PT_WRITABLE_MASK;
|
|
}
|
|
}
|
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_mmu_zap_all(struct kvm *kvm)
|
|
void kvm_mmu_zap_all(struct kvm *kvm)
|