|
@@ -168,8 +168,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
|
|
|
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|
{
|
|
{
|
|
|
|
+ int dirty_count = kvm->tlbs_dirty;
|
|
|
|
+
|
|
|
|
+ smp_mb();
|
|
if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
|
|
if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
|
|
++kvm->stat.remote_tlb_flush;
|
|
++kvm->stat.remote_tlb_flush;
|
|
|
|
+ cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_reload_remote_mmus(struct kvm *kvm)
|
|
void kvm_reload_remote_mmus(struct kvm *kvm)
|
|
@@ -249,7 +253,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
spin_lock(&kvm->mmu_lock);
|
|
spin_lock(&kvm->mmu_lock);
|
|
kvm->mmu_notifier_seq++;
|
|
kvm->mmu_notifier_seq++;
|
|
- need_tlb_flush = kvm_unmap_hva(kvm, address);
|
|
|
|
|
|
+ need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
|
|
spin_unlock(&kvm->mmu_lock);
|
|
spin_unlock(&kvm->mmu_lock);
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
|
|
|
|
@@ -293,6 +297,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|
kvm->mmu_notifier_count++;
|
|
kvm->mmu_notifier_count++;
|
|
for (; start < end; start += PAGE_SIZE)
|
|
for (; start < end; start += PAGE_SIZE)
|
|
need_tlb_flush |= kvm_unmap_hva(kvm, start);
|
|
need_tlb_flush |= kvm_unmap_hva(kvm, start);
|
|
|
|
+ need_tlb_flush |= kvm->tlbs_dirty;
|
|
spin_unlock(&kvm->mmu_lock);
|
|
spin_unlock(&kvm->mmu_lock);
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
|
|
|