|
@@ -702,12 +702,16 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
|
|
|
if (unlikely(vcpu->kvm->mmu_notifier_count))
|
|
|
return 1;
|
|
|
/*
|
|
|
- * Both reads happen under the mmu_lock and both values are
|
|
|
- * modified under mmu_lock, so there's no need of smb_rmb()
|
|
|
- * here in between, otherwise mmu_notifier_count should be
|
|
|
- * read before mmu_notifier_seq, see
|
|
|
- * mmu_notifier_invalidate_range_end write side.
|
|
|
+ * Ensure the read of mmu_notifier_count happens before the read
|
|
|
+ * of mmu_notifier_seq. This interacts with the smp_wmb() in
|
|
|
+ * mmu_notifier_invalidate_range_end to make sure that the caller
|
|
|
+ * either sees the old (non-zero) value of mmu_notifier_count or
|
|
|
+ * the new (incremented) value of mmu_notifier_seq.
|
|
|
+ * PowerPC Book3s HV KVM calls this under a per-page lock
|
|
|
+ * rather than under kvm->mmu_lock, for scalability, so
|
|
|
+ * can't rely on kvm->mmu_lock to keep things ordered.
|
|
|
*/
|
|
|
+ smp_rmb();
|
|
|
if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
|
|
|
return 1;
|
|
|
return 0;
|