|
@@ -289,15 +289,15 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
|
|
|
*/
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
+
|
|
|
kvm->mmu_notifier_seq++;
|
|
|
need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
|
|
|
- spin_unlock(&kvm->mmu_lock);
|
|
|
- srcu_read_unlock(&kvm->srcu, idx);
|
|
|
-
|
|
|
/* we've to flush the tlb before the pages can be freed */
|
|
|
if (need_tlb_flush)
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
+ srcu_read_unlock(&kvm->srcu, idx);
|
|
|
}
|
|
|
|
|
|
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
|
|
@@ -335,12 +335,12 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|
|
for (; start < end; start += PAGE_SIZE)
|
|
|
need_tlb_flush |= kvm_unmap_hva(kvm, start);
|
|
|
need_tlb_flush |= kvm->tlbs_dirty;
|
|
|
- spin_unlock(&kvm->mmu_lock);
|
|
|
- srcu_read_unlock(&kvm->srcu, idx);
|
|
|
-
|
|
|
/* we've to flush the tlb before the pages can be freed */
|
|
|
if (need_tlb_flush)
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
+
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
+ srcu_read_unlock(&kvm->srcu, idx);
|
|
|
}
|
|
|
|
|
|
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
|
|
@@ -378,13 +378,14 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
|
|
|
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
- young = kvm_age_hva(kvm, address);
|
|
|
- spin_unlock(&kvm->mmu_lock);
|
|
|
- srcu_read_unlock(&kvm->srcu, idx);
|
|
|
|
|
|
+ young = kvm_age_hva(kvm, address);
|
|
|
if (young)
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
+ srcu_read_unlock(&kvm->srcu, idx);
|
|
|
+
|
|
|
return young;
|
|
|
}
|
|
|
|